1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
3 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
4 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX11 %s
5 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX12 %s
7 define amdgpu_ps <4 x float> @sample_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s) {
8 ; GFX9-LABEL: name: sample_1d
9 ; GFX9: bb.1.main_body:
10 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
12 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
13 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
14 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
15 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
16 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
17 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
18 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
19 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
20 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
21 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
22 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
23 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
24 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
25 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
26 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
27 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
28 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
29 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
30 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
31 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
32 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
33 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
34 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
35 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
36 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
38 ; GFX10-LABEL: name: sample_1d
39 ; GFX10: bb.1.main_body:
40 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
42 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
43 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
44 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
45 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
46 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
47 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
48 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
49 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
50 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
51 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
52 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
53 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
54 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
55 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
56 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
57 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
58 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
59 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
60 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
61 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
62 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
63 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
64 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
65 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
66 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
68 ; GFX11-LABEL: name: sample_1d
69 ; GFX11: bb.1.main_body:
70 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
72 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
73 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
74 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
75 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
76 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
77 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
78 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
79 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
80 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
81 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
82 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
83 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
84 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
85 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
86 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
87 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
88 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
89 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
90 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
91 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
92 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
93 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
94 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
95 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
96 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
98 ; GFX12-LABEL: name: sample_1d
99 ; GFX12: bb.1.main_body:
100 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
102 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
103 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
104 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
105 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
106 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
107 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
108 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
109 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
110 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
111 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
112 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
113 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
114 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
115 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
116 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
117 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
118 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
119 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
120 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
121 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
122 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
123 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
124 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
125 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
126 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
128 %v = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f16(i32 15, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
132 define amdgpu_ps <4 x float> @sample_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t) {
133 ; GFX9-LABEL: name: sample_2d
134 ; GFX9: bb.1.main_body:
135 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
137 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
138 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
139 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
140 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
141 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
142 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
143 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
144 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
145 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
146 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
147 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
148 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
149 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
150 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
151 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
152 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
153 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
154 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
155 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
156 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
157 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
158 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
159 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
160 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
161 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
162 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
164 ; GFX10-LABEL: name: sample_2d
165 ; GFX10: bb.1.main_body:
166 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
168 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
169 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
170 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
171 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
172 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
173 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
174 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
175 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
176 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
177 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
178 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
179 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
180 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
181 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
182 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
183 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
184 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
185 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
186 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
187 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
188 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
189 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
190 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
191 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
192 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
193 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
195 ; GFX11-LABEL: name: sample_2d
196 ; GFX11: bb.1.main_body:
197 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
199 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
200 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
201 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
202 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
203 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
204 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
205 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
206 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
207 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
208 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
209 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
210 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
211 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
212 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
213 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
214 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
215 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
216 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
217 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
218 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
219 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
220 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
221 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
222 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
223 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
224 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
226 ; GFX12-LABEL: name: sample_2d
227 ; GFX12: bb.1.main_body:
228 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
230 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
231 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
232 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
233 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
234 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
235 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
236 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
237 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
238 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
239 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
240 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
241 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
242 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
243 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
244 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
245 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
246 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
247 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
248 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
249 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
250 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
251 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
252 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
253 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
254 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
255 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
257 %v = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f16(i32 15, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
261 define amdgpu_ps <4 x float> @sample_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %r) {
262 ; GFX9-LABEL: name: sample_3d
263 ; GFX9: bb.1.main_body:
264 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
266 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
267 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
268 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
269 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
270 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
271 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
272 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
273 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
274 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
275 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
276 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
277 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
278 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
279 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
280 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
281 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
282 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
283 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
284 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
285 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
286 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
287 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
288 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
289 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
290 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
291 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
292 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
293 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
294 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
295 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
296 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
298 ; GFX10-LABEL: name: sample_3d
299 ; GFX10: bb.1.main_body:
300 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
302 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
303 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
304 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
305 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
306 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
307 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
308 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
309 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
310 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
311 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
312 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
313 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
314 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
315 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
316 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
317 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
318 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
319 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
320 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
321 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
322 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
323 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
324 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
325 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
326 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
327 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
328 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
329 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
330 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
331 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
332 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
334 ; GFX11-LABEL: name: sample_3d
335 ; GFX11: bb.1.main_body:
336 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
338 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
339 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
340 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
341 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
342 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
343 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
344 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
345 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
346 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
347 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
348 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
349 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
350 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
351 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
352 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
353 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
354 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
355 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
356 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
357 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
358 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
359 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
360 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
361 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
362 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
363 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
364 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
365 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
366 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
367 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
368 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
370 ; GFX12-LABEL: name: sample_3d
371 ; GFX12: bb.1.main_body:
372 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
374 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
375 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
376 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
377 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
378 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
379 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
380 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
381 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
382 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
383 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
384 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
385 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
386 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
387 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
388 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
389 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
390 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
391 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
392 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
393 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
394 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
395 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
396 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
397 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
398 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
399 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
400 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
401 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
402 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
403 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
405 %v = call <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f16(i32 15, half %s, half %t, half %r, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
409 define amdgpu_ps <4 x float> @sample_cube(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %face) {
410 ; GFX9-LABEL: name: sample_cube
411 ; GFX9: bb.1.main_body:
412 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
414 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
415 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
416 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
417 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
418 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
419 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
420 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
421 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
422 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
423 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
424 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
425 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
426 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
427 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
428 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
429 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
430 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
431 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
432 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
433 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
434 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
435 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
436 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
437 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
438 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
439 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
440 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
441 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
442 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
443 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
444 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
446 ; GFX10-LABEL: name: sample_cube
447 ; GFX10: bb.1.main_body:
448 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
450 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
451 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
452 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
453 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
454 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
455 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
456 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
457 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
458 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
459 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
460 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
461 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
462 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
463 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
464 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
465 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
466 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
467 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
468 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
469 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
470 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
471 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
472 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
473 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
474 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
475 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
476 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
477 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
478 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
479 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
480 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
482 ; GFX11-LABEL: name: sample_cube
483 ; GFX11: bb.1.main_body:
484 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
486 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
487 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
488 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
489 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
490 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
491 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
492 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
493 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
494 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
495 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
496 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
497 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
498 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
499 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
500 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
501 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
502 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
503 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
504 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
505 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
506 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
507 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
508 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
509 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
510 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
511 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
512 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
513 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
514 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
515 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
516 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
518 ; GFX12-LABEL: name: sample_cube
519 ; GFX12: bb.1.main_body:
520 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
522 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
523 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
524 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
525 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
526 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
527 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
528 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
529 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
530 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
531 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
532 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
533 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
534 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
535 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
536 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
537 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
538 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
539 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
540 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
541 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
542 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
543 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
544 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
545 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
546 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
547 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
548 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
549 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
550 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
551 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
553 %v = call <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f16(i32 15, half %s, half %t, half %face, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
557 define amdgpu_ps <4 x float> @sample_1darray(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %slice) {
558 ; GFX9-LABEL: name: sample_1darray
559 ; GFX9: bb.1.main_body:
560 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
562 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
563 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
564 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
565 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
566 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
567 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
568 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
569 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
570 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
571 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
572 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
573 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
574 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
575 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
576 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
577 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
578 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
579 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
580 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
581 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
582 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
583 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
584 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
585 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
586 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
587 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
589 ; GFX10-LABEL: name: sample_1darray
590 ; GFX10: bb.1.main_body:
591 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
593 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
594 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
595 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
596 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
597 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
598 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
599 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
600 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
601 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
602 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
603 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
604 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
605 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
606 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
607 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
608 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
609 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
610 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
611 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
612 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
613 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
614 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
615 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
616 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
617 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
618 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
620 ; GFX11-LABEL: name: sample_1darray
621 ; GFX11: bb.1.main_body:
622 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
624 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
625 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
626 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
627 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
628 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
629 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
630 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
631 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
632 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
633 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
634 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
635 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
636 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
637 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
638 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
639 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
640 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
641 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
642 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
643 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
644 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
645 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
646 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
647 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
648 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
649 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
651 ; GFX12-LABEL: name: sample_1darray
652 ; GFX12: bb.1.main_body:
653 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
655 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
656 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
657 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
658 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
659 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
660 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
661 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
662 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
663 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
664 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
665 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
666 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
667 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
668 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
669 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
670 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
671 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
672 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
673 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
674 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
675 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
676 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
677 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
678 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
679 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
680 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
682 %v = call <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f16(i32 15, half %s, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
686 define amdgpu_ps <4 x float> @sample_2darray(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %slice) {
687 ; GFX9-LABEL: name: sample_2darray
688 ; GFX9: bb.1.main_body:
689 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
691 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
692 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
693 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
694 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
695 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
696 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
697 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
698 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
699 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
700 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
701 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
702 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
703 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
704 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
705 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
706 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
707 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
708 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
709 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
710 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
711 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
712 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
713 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
714 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
715 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
716 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
717 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
718 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
719 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
720 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
721 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
723 ; GFX10-LABEL: name: sample_2darray
724 ; GFX10: bb.1.main_body:
725 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
727 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
728 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
729 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
730 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
731 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
732 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
733 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
734 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
735 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
736 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
737 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
738 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
739 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
740 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
741 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
742 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
743 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
744 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
745 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
746 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
747 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
748 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
749 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
750 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
751 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
752 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
753 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
754 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
755 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
756 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
757 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
759 ; GFX11-LABEL: name: sample_2darray
760 ; GFX11: bb.1.main_body:
761 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
763 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
764 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
765 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
766 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
767 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
768 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
769 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
770 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
771 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
772 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
773 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
774 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
775 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
776 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
777 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
778 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
779 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
780 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
781 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
782 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
783 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
784 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
785 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
786 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
787 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
788 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
789 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
790 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
791 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
792 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
793 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
795 ; GFX12-LABEL: name: sample_2darray
796 ; GFX12: bb.1.main_body:
797 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
799 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
800 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
801 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
802 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
803 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
804 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
805 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
806 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
807 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
808 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
809 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
810 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
811 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
812 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
813 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
814 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
815 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
816 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
817 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
818 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
819 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
820 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
821 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
822 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
823 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
824 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
825 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
826 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
827 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
828 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
830 %v = call <4 x float> @llvm.amdgcn.image.sample.2darray.v4f32.f16(i32 15, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
834 define amdgpu_ps <4 x float> @sample_c_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s) {
835 ; GFX9-LABEL: name: sample_c_1d
836 ; GFX9: bb.1.main_body:
837 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
839 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
840 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
841 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
842 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
843 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
844 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
845 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
846 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
847 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
848 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
849 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
850 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
851 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
852 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
853 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
854 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
855 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
856 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
857 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
858 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
859 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
860 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
861 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
862 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
863 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
864 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
865 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
866 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
868 ; GFX10-LABEL: name: sample_c_1d
869 ; GFX10: bb.1.main_body:
870 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
872 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
873 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
874 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
875 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
876 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
877 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
878 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
879 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
880 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
881 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
882 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
883 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
884 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
885 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
886 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
887 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
888 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
889 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
890 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
891 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
892 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
893 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
894 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
895 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
896 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
897 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
898 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
899 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
901 ; GFX11-LABEL: name: sample_c_1d
902 ; GFX11: bb.1.main_body:
903 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
905 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
906 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
907 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
908 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
909 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
910 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
911 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
912 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
913 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
914 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
915 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
916 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
917 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
918 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
919 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
920 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
921 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
922 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
923 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
924 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
925 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
926 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
927 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
928 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
929 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
930 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
931 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
932 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
934 ; GFX12-LABEL: name: sample_c_1d
935 ; GFX12: bb.1.main_body:
936 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
938 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
939 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
940 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
941 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
942 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
943 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
944 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
945 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
946 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
947 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
948 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
949 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
950 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
951 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
952 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
953 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
954 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
955 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
956 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
957 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
958 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
959 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
960 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
961 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
962 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
963 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
964 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
966 %v = call <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f16(i32 15, float %zcompare, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
970 define amdgpu_ps <4 x float> @sample_c_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t) {
971 ; GFX9-LABEL: name: sample_c_2d
972 ; GFX9: bb.1.main_body:
973 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
975 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
976 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
977 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
978 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
979 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
980 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
981 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
982 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
983 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
984 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
985 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
986 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
987 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
988 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
989 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
990 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
991 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
992 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
993 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
994 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
995 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
996 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
997 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
998 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
999 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1000 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1001 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1002 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1003 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1005 ; GFX10-LABEL: name: sample_c_2d
1006 ; GFX10: bb.1.main_body:
1007 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1008 ; GFX10-NEXT: {{ $}}
1009 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1010 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1011 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1012 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1013 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1014 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1015 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1016 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1017 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1018 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1019 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1020 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1021 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1022 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1023 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1024 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1025 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1026 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1027 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1028 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1029 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1030 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1031 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1032 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1033 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1034 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1035 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1036 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1037 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1039 ; GFX11-LABEL: name: sample_c_2d
1040 ; GFX11: bb.1.main_body:
1041 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1042 ; GFX11-NEXT: {{ $}}
1043 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1044 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1045 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1046 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1047 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1048 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1049 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1050 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1051 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1052 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1053 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1054 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1055 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1056 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1057 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1058 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1059 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1060 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1061 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1062 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1063 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1064 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1065 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1066 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1067 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1068 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1069 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1070 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1071 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1073 ; GFX12-LABEL: name: sample_c_2d
1074 ; GFX12: bb.1.main_body:
1075 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1076 ; GFX12-NEXT: {{ $}}
1077 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1078 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1079 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1080 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1081 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1082 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1083 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1084 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1085 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1086 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1087 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1088 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1089 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1090 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1091 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1092 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1093 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1094 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1095 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1096 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1097 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1098 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1099 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1100 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1101 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1102 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1103 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1104 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1106 %v = call <4 x float> @llvm.amdgcn.image.sample.c.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1110 define amdgpu_ps <4 x float> @sample_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %clamp) {
1111 ; GFX9-LABEL: name: sample_cl_1d
1112 ; GFX9: bb.1.main_body:
1113 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1115 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1116 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1117 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1118 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1119 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1120 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1121 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1122 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1123 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1124 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1125 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1126 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1127 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1128 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1129 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1130 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1131 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1132 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1133 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1134 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1135 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1136 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1137 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1138 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1139 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1140 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1142 ; GFX10-LABEL: name: sample_cl_1d
1143 ; GFX10: bb.1.main_body:
1144 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1145 ; GFX10-NEXT: {{ $}}
1146 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1147 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1148 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1149 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1150 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1151 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1152 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1153 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1154 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1155 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1156 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1157 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1158 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1159 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1160 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1161 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1162 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1163 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1164 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1165 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1166 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1167 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1168 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1169 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1170 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1171 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1173 ; GFX11-LABEL: name: sample_cl_1d
1174 ; GFX11: bb.1.main_body:
1175 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1176 ; GFX11-NEXT: {{ $}}
1177 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1178 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1179 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1180 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1181 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1182 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1183 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1184 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1185 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1186 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1187 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1188 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1189 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1190 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1191 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1192 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1193 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1194 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1195 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1196 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1197 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1198 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1199 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1200 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1201 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1202 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1204 ; GFX12-LABEL: name: sample_cl_1d
1205 ; GFX12: bb.1.main_body:
1206 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1207 ; GFX12-NEXT: {{ $}}
1208 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1209 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1210 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1211 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1212 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1213 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1214 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1215 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1216 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1217 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1218 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1219 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1220 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1221 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1222 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1223 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1224 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1225 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1226 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1227 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1228 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1229 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1230 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1231 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1232 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1233 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1235 %v = call <4 x float> @llvm.amdgcn.image.sample.cl.1d.v4f32.f16(i32 15, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1239 define amdgpu_ps <4 x float> @sample_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %clamp) {
1240 ; GFX9-LABEL: name: sample_cl_2d
1241 ; GFX9: bb.1.main_body:
1242 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1244 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1245 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1246 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1247 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1248 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1249 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1250 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1251 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1252 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1253 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1254 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1255 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1256 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1257 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1258 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1259 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1260 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1261 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1262 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1263 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1264 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1265 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1266 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1267 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1268 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1269 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1270 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1271 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1272 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1273 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1274 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1276 ; GFX10-LABEL: name: sample_cl_2d
1277 ; GFX10: bb.1.main_body:
1278 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1279 ; GFX10-NEXT: {{ $}}
1280 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1281 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1282 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1283 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1284 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1285 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1286 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1287 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1288 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1289 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1290 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1291 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1292 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1293 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1294 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1295 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1296 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1297 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1298 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1299 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1300 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1301 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1302 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1303 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1304 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1305 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1306 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1307 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1308 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1309 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1310 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1312 ; GFX11-LABEL: name: sample_cl_2d
1313 ; GFX11: bb.1.main_body:
1314 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1315 ; GFX11-NEXT: {{ $}}
1316 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1317 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1318 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1319 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1320 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1321 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1322 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1323 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1324 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1325 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1326 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1327 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1328 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1329 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1330 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1331 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1332 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1333 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1334 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1335 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1336 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1337 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1338 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1339 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1340 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1341 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1342 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1343 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1344 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1345 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1346 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1348 ; GFX12-LABEL: name: sample_cl_2d
1349 ; GFX12: bb.1.main_body:
1350 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1351 ; GFX12-NEXT: {{ $}}
1352 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1353 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1354 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1355 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1356 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1357 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1358 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1359 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1360 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1361 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1362 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1363 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1364 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1365 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1366 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1367 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1368 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1369 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1370 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1371 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1372 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1373 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1374 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1375 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1376 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1377 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1378 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1379 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1380 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1381 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1383 %v = call <4 x float> @llvm.amdgcn.image.sample.cl.2d.v4f32.f16(i32 15, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1387 define amdgpu_ps <4 x float> @sample_c_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %clamp) {
1388 ; GFX9-LABEL: name: sample_c_cl_1d
1389 ; GFX9: bb.1.main_body:
1390 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1392 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1393 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1394 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1395 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1396 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1397 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1398 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1399 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1400 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1401 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1402 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1403 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1404 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1405 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1406 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1407 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1408 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1409 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1410 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1411 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1412 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1413 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1414 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1415 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1416 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1417 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1418 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1419 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1420 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1422 ; GFX10-LABEL: name: sample_c_cl_1d
1423 ; GFX10: bb.1.main_body:
1424 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1425 ; GFX10-NEXT: {{ $}}
1426 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1427 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1428 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1429 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1430 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1431 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1432 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1433 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1434 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1435 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1436 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1437 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1438 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1439 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1440 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1441 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1442 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1443 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1444 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1445 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1446 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1447 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1448 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1449 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1450 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1451 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1452 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1453 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1454 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1456 ; GFX11-LABEL: name: sample_c_cl_1d
1457 ; GFX11: bb.1.main_body:
1458 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1459 ; GFX11-NEXT: {{ $}}
1460 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1461 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1462 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1463 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1464 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1465 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1466 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1467 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1468 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1469 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1470 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1471 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1472 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1473 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1474 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1475 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1476 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1477 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1478 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1479 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1480 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1481 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1482 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1483 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1484 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1485 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1486 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1487 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1488 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1490 ; GFX12-LABEL: name: sample_c_cl_1d
1491 ; GFX12: bb.1.main_body:
1492 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1493 ; GFX12-NEXT: {{ $}}
1494 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1495 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1496 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1497 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1498 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1499 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1500 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1501 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1502 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1503 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1504 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1505 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1506 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1507 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1508 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1509 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1510 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1511 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1512 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1513 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1514 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1515 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1516 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1517 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1518 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1519 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1520 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1521 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1523 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cl.1d.v4f32.f16(i32 15, float %zcompare, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1527 define amdgpu_ps <4 x float> @sample_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %clamp) {
1528 ; GFX9-LABEL: name: sample_c_cl_2d
1529 ; GFX9: bb.1.main_body:
1530 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1532 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1533 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1534 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1535 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1536 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1537 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1538 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1539 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1540 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1541 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1542 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1543 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1544 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1545 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1546 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1547 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1548 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1549 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1550 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1551 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1552 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1553 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1554 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1555 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1556 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1557 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1558 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1559 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1560 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1561 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1562 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1563 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1564 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1566 ; GFX10-LABEL: name: sample_c_cl_2d
1567 ; GFX10: bb.1.main_body:
1568 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1569 ; GFX10-NEXT: {{ $}}
1570 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1571 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1572 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1573 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1574 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1575 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1576 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1577 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1578 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1579 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1580 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1581 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1582 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1583 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1584 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1585 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1586 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1587 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1588 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1589 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1590 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1591 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1592 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1593 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1594 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1595 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1596 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1597 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1598 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1599 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1600 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1601 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1603 ; GFX11-LABEL: name: sample_c_cl_2d
1604 ; GFX11: bb.1.main_body:
1605 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1606 ; GFX11-NEXT: {{ $}}
1607 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1608 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1609 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1610 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1611 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1612 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1613 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1614 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1615 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1616 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1617 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1618 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1619 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1620 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1621 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1622 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1623 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1624 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1625 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1626 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1627 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1628 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1629 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1630 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1631 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1632 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1633 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1634 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1635 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1636 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1637 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1638 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1640 ; GFX12-LABEL: name: sample_c_cl_2d
1641 ; GFX12: bb.1.main_body:
1642 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1643 ; GFX12-NEXT: {{ $}}
1644 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1645 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1646 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1647 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1648 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1649 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1650 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1651 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1652 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1653 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1654 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1655 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1656 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1657 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1658 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1659 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1660 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1661 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1662 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1663 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1664 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1665 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1666 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1667 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1668 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1669 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1670 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1671 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1672 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1673 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1674 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1675 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1677 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cl.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1681 define amdgpu_ps <4 x float> @sample_b_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s) {
1682 ; GFX9-LABEL: name: sample_b_1d
1683 ; GFX9: bb.1.main_body:
1684 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1686 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1687 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1688 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1689 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1690 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1691 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1692 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1693 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1694 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1695 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1696 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1697 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1698 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1699 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1700 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1701 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1702 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1703 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1704 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1705 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1706 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1707 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1708 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1709 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1710 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1711 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1712 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1713 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1714 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1716 ; GFX10-LABEL: name: sample_b_1d
1717 ; GFX10: bb.1.main_body:
1718 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1719 ; GFX10-NEXT: {{ $}}
1720 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1721 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1722 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1723 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1724 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1725 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1726 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1727 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1728 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1729 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1730 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1731 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1732 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1733 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1734 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1735 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1736 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1737 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1738 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1739 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1740 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1741 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1742 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1743 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1744 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1745 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1746 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1747 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1748 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1750 ; GFX11-LABEL: name: sample_b_1d
1751 ; GFX11: bb.1.main_body:
1752 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1753 ; GFX11-NEXT: {{ $}}
1754 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1755 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1756 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1757 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1758 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1759 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1760 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1761 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1762 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1763 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1764 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1765 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1766 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1767 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1768 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1769 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1770 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1771 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1772 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1773 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1774 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1775 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1776 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1777 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1778 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1779 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1780 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1781 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1782 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1784 ; GFX12-LABEL: name: sample_b_1d
1785 ; GFX12: bb.1.main_body:
1786 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1787 ; GFX12-NEXT: {{ $}}
1788 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1789 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1790 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1791 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1792 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1793 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1794 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1795 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1796 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1797 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1798 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1799 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1800 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1801 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1802 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1803 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1804 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1805 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1806 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1807 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1808 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1809 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1810 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1811 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1812 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1813 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1814 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1815 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1817 %v = call <4 x float> @llvm.amdgcn.image.sample.b.1d.v4f32.f16.f16(i32 15, half %bias, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1821 define amdgpu_ps <4 x float> @sample_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s, half %t) {
1822 ; GFX9-LABEL: name: sample_b_2d
1823 ; GFX9: bb.1.main_body:
1824 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1826 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1827 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1828 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1829 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1830 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1831 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1832 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1833 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1834 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1835 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1836 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1837 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1838 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1839 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1840 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1841 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1842 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1843 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1844 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1845 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1846 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1847 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1848 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1849 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1850 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1851 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1852 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1853 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1854 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1855 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1856 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1858 ; GFX10-LABEL: name: sample_b_2d
1859 ; GFX10: bb.1.main_body:
1860 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1861 ; GFX10-NEXT: {{ $}}
1862 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1863 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1864 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1865 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1866 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1867 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1868 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1869 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1870 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1871 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1872 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1873 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1874 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1875 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1876 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1877 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1878 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1879 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1880 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1881 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1882 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1883 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1884 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1885 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1886 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1887 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1888 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1889 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1890 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1891 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1892 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1894 ; GFX11-LABEL: name: sample_b_2d
1895 ; GFX11: bb.1.main_body:
1896 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1897 ; GFX11-NEXT: {{ $}}
1898 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1899 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1900 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1901 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1902 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1903 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1904 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1905 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1906 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1907 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1908 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1909 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1910 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1911 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1912 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1913 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1914 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1915 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1916 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1917 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1918 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1919 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1920 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1921 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1922 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1923 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1924 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1925 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1926 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1927 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1928 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1930 ; GFX12-LABEL: name: sample_b_2d
1931 ; GFX12: bb.1.main_body:
1932 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1933 ; GFX12-NEXT: {{ $}}
1934 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1935 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1936 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1937 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1938 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1939 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1940 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1941 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1942 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1943 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1944 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1945 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1946 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1947 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1948 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1949 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1950 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1951 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1952 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1953 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1954 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1955 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1956 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1957 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1958 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1959 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
1960 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
1961 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
1962 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
1963 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1965 %v = call <4 x float> @llvm.amdgcn.image.sample.b.2d.v4f32.f16.f16(i32 15, half %bias, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1969 define amdgpu_ps <4 x float> @sample_c_b_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s) {
1970 ; GFX9-LABEL: name: sample_c_b_1d
1971 ; GFX9: bb.1.main_body:
1972 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1974 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1975 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1976 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1977 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1978 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1979 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1980 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1981 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1982 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1983 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1984 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1985 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1986 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1987 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1988 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1989 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1990 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1991 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1992 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1993 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1994 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1995 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1996 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1997 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1998 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1999 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2000 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2001 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2002 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2003 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2004 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2006 ; GFX10-LABEL: name: sample_c_b_1d
2007 ; GFX10: bb.1.main_body:
2008 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2009 ; GFX10-NEXT: {{ $}}
2010 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2011 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2012 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2013 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2014 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2015 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2016 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2017 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2018 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2019 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2020 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2021 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2022 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2023 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2024 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2025 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2026 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2027 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2028 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2029 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2030 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2031 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2032 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2033 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2034 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2035 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2036 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2037 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2038 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2039 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2041 ; GFX11-LABEL: name: sample_c_b_1d
2042 ; GFX11: bb.1.main_body:
2043 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2044 ; GFX11-NEXT: {{ $}}
2045 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2046 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2047 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2048 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2049 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2050 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2051 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2052 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2053 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2054 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2055 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2056 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2057 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2058 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2059 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2060 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2061 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2062 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2063 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2064 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2065 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2066 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2067 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2068 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2069 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2070 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2071 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2072 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2073 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2074 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2076 ; GFX12-LABEL: name: sample_c_b_1d
2077 ; GFX12: bb.1.main_body:
2078 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2079 ; GFX12-NEXT: {{ $}}
2080 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2081 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2082 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2083 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2084 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2085 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2086 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2087 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2088 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2089 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2090 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2091 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2092 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2093 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2094 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2095 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2096 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2097 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2098 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2099 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2100 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2101 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2102 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2103 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2104 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2105 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
2106 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
2107 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
2108 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
2109 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2111 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.1d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2115 define amdgpu_ps <4 x float> @sample_c_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s, half %t) {
2116 ; GFX9-LABEL: name: sample_c_b_2d
2117 ; GFX9: bb.1.main_body:
2118 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2120 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2121 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2122 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2123 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2124 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2125 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2126 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2127 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2128 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2129 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2130 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2131 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2132 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2133 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2134 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2135 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2136 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2137 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2138 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2139 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2140 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2141 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2142 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2143 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2144 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2145 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
2146 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2147 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2148 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2149 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2150 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2151 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2152 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2154 ; GFX10-LABEL: name: sample_c_b_2d
2155 ; GFX10: bb.1.main_body:
2156 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2157 ; GFX10-NEXT: {{ $}}
2158 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2159 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2160 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2161 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2162 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2163 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2164 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2165 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2166 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2167 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2168 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2169 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2170 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2171 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2172 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2173 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2174 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2175 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2176 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2177 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2178 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2179 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2180 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2181 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2182 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2183 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2184 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2185 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2186 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2187 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2188 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2189 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2191 ; GFX11-LABEL: name: sample_c_b_2d
2192 ; GFX11: bb.1.main_body:
2193 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2194 ; GFX11-NEXT: {{ $}}
2195 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2196 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2197 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2198 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2199 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2200 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2201 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2202 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2203 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2204 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2205 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2206 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2207 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2208 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2209 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2210 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2211 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2212 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2213 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2214 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2215 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2216 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2217 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2218 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2219 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2220 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2221 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2222 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2223 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2224 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2225 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2226 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2228 ; GFX12-LABEL: name: sample_c_b_2d
2229 ; GFX12: bb.1.main_body:
2230 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2231 ; GFX12-NEXT: {{ $}}
2232 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2233 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2234 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2235 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2236 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2237 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2238 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2239 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2240 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2241 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2242 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2243 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2244 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2245 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2246 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2247 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2248 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2249 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2250 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2251 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2252 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2253 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2254 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2255 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2256 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2257 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2258 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2259 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
2260 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
2261 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
2262 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
2263 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2265 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.2d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2269 define amdgpu_ps <4 x float> @sample_b_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s, half %clamp) {
2270 ; GFX9-LABEL: name: sample_b_cl_1d
2271 ; GFX9: bb.1.main_body:
2272 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2274 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2275 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2276 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2277 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2278 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2279 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2280 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2281 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2282 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2283 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2284 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2285 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2286 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2287 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2288 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2289 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2290 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2291 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2292 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2293 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2294 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2295 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2296 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2297 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
2298 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2299 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2300 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2301 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2302 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2303 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2304 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2306 ; GFX10-LABEL: name: sample_b_cl_1d
2307 ; GFX10: bb.1.main_body:
2308 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2309 ; GFX10-NEXT: {{ $}}
2310 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2311 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2312 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2313 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2314 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2315 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2316 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2317 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2318 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2319 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2320 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2321 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2322 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2323 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2324 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2325 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2326 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2327 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2328 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2329 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2330 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2331 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2332 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2333 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
2334 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2335 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2336 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2337 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2338 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2339 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2340 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2342 ; GFX11-LABEL: name: sample_b_cl_1d
2343 ; GFX11: bb.1.main_body:
2344 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2345 ; GFX11-NEXT: {{ $}}
2346 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2347 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2348 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2349 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2350 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2351 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2352 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2353 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2354 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2355 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2356 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2357 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2358 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2359 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2360 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2361 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2362 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2363 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2364 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2365 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2366 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2367 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2368 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2369 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
2370 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2371 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2372 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2373 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2374 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2375 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2376 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2378 ; GFX12-LABEL: name: sample_b_cl_1d
2379 ; GFX12: bb.1.main_body:
2380 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2381 ; GFX12-NEXT: {{ $}}
2382 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2383 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2384 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2385 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2386 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2387 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2388 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2389 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2390 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2391 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2392 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2393 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2394 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2395 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2396 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2397 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2398 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2399 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2400 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2401 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2402 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2403 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2404 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2405 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2406 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2407 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
2408 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
2409 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
2410 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
2411 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2413 %v = call <4 x float> @llvm.amdgcn.image.sample.b.cl.1d.v4f32.f16.f16(i32 15, half %bias, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2417 define amdgpu_ps <4 x float> @sample_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s, half %t, half %clamp) {
2418 ; GFX9-LABEL: name: sample_b_cl_2d
2419 ; GFX9: bb.1.main_body:
2420 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2422 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2423 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2424 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2425 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2426 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2427 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2428 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2429 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2430 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2431 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2432 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2433 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2434 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2435 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2436 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2437 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2438 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2439 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2440 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2441 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2442 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2443 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2444 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2445 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2446 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2447 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2448 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2449 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2450 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2451 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2452 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2453 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2454 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2455 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2457 ; GFX10-LABEL: name: sample_b_cl_2d
2458 ; GFX10: bb.1.main_body:
2459 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2460 ; GFX10-NEXT: {{ $}}
2461 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2462 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2463 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2464 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2465 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2466 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2467 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2468 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2469 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2470 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2471 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2472 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2473 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2474 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2475 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2476 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2477 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2478 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2479 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2480 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2481 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2482 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2483 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2484 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2485 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2486 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2487 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2488 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2489 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2490 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2491 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2492 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2493 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2495 ; GFX11-LABEL: name: sample_b_cl_2d
2496 ; GFX11: bb.1.main_body:
2497 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2498 ; GFX11-NEXT: {{ $}}
2499 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2500 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2501 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2502 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2503 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2504 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2505 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2506 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2507 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2508 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2509 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2510 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2511 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2512 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2513 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2514 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2515 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2516 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2517 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2518 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2519 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2520 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2521 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2522 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2523 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2524 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2525 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2526 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2527 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2528 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2529 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2530 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2531 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2533 ; GFX12-LABEL: name: sample_b_cl_2d
2534 ; GFX12: bb.1.main_body:
2535 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2536 ; GFX12-NEXT: {{ $}}
2537 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2538 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2539 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2540 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2541 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2542 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2543 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2544 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2545 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2546 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2547 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2548 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2549 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2550 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2551 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2552 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2553 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2554 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2555 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2556 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2557 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2558 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2559 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2560 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2561 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2562 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2563 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2564 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2565 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
2566 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
2567 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
2568 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
2569 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2571 %v = call <4 x float> @llvm.amdgcn.image.sample.b.cl.2d.v4f32.f16.f16(i32 15, half %bias, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2575 define amdgpu_ps <4 x float> @sample_c_b_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s, half %clamp) {
2576 ; GFX9-LABEL: name: sample_c_b_cl_1d
2577 ; GFX9: bb.1.main_body:
2578 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2580 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2581 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2582 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2583 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2584 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2585 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2586 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2587 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2588 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2589 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2590 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2591 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2592 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2593 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2594 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2595 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2596 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2597 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2598 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2599 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2600 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2601 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2602 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2603 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2604 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2605 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
2606 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2607 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2608 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2609 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2610 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2611 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2612 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2614 ; GFX10-LABEL: name: sample_c_b_cl_1d
2615 ; GFX10: bb.1.main_body:
2616 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2617 ; GFX10-NEXT: {{ $}}
2618 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2619 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2620 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2621 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2622 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2623 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2624 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2625 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2626 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2627 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2628 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2629 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2630 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2631 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2632 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2633 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2634 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2635 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2636 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2637 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2638 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2639 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2640 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2641 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2642 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2643 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2644 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2645 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2646 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2647 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2648 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2649 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2651 ; GFX11-LABEL: name: sample_c_b_cl_1d
2652 ; GFX11: bb.1.main_body:
2653 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2654 ; GFX11-NEXT: {{ $}}
2655 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2656 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2657 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2658 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2659 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2660 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2661 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2662 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2663 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2664 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2665 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2666 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2667 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2668 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2669 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2670 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2671 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2672 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2673 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2674 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2675 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2676 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2677 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2678 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2679 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2680 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2681 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2682 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2683 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2684 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2685 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2686 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2688 ; GFX12-LABEL: name: sample_c_b_cl_1d
2689 ; GFX12: bb.1.main_body:
2690 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2691 ; GFX12-NEXT: {{ $}}
2692 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2693 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2694 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2695 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2696 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2697 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2698 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2699 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2700 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2701 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2702 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2703 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2704 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2705 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2706 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2707 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2708 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2709 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2710 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2711 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2712 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2713 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2714 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2715 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2716 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2717 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2718 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2719 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
2720 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
2721 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
2722 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
2723 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2725 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.1d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2729 define amdgpu_ps <4 x float> @sample_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s, half %t, half %clamp) {
2730 ; GFX9-LABEL: name: sample_c_b_cl_2d
2731 ; GFX9: bb.1.main_body:
2732 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2734 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2735 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2736 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2737 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2738 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2739 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2740 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2741 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2742 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2743 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2744 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2745 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2746 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2747 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2748 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2749 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2750 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2751 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2752 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2753 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2754 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2755 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2756 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2757 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2758 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2759 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2760 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2761 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2762 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2763 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2764 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2765 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2766 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2767 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2768 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2769 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2771 ; GFX10-LABEL: name: sample_c_b_cl_2d
2772 ; GFX10: bb.1.main_body:
2773 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2774 ; GFX10-NEXT: {{ $}}
2775 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2776 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2777 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2778 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2779 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2780 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2781 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2782 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2783 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2784 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2785 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2786 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2787 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2788 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2789 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2790 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2791 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2792 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2793 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2794 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2795 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2796 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2797 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2798 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2799 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2800 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2801 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2802 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2803 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2804 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2805 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2806 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2807 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2808 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2809 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2811 ; GFX11-LABEL: name: sample_c_b_cl_2d
2812 ; GFX11: bb.1.main_body:
2813 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2814 ; GFX11-NEXT: {{ $}}
2815 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2816 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2817 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2818 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2819 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2820 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2821 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2822 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2823 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2824 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2825 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2826 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2827 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2828 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2829 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2830 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2831 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2832 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2833 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2834 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2835 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2836 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2837 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2838 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2839 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2840 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2841 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2842 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2843 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2844 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2845 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2846 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2847 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2848 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2849 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2851 ; GFX12-LABEL: name: sample_c_b_cl_2d
2852 ; GFX12: bb.1.main_body:
2853 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2854 ; GFX12-NEXT: {{ $}}
2855 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2856 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2857 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2858 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2859 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2860 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2861 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2862 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2863 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2864 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2865 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2866 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2867 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2868 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2869 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2870 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2871 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2872 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2873 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2874 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2875 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2876 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2877 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2878 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2879 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2880 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2881 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2882 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2883 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2884 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2885 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
2886 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
2887 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
2888 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
2889 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2891 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.2d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2895 define amdgpu_ps <4 x float> @sample_d_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s) {
2896 ; GFX9-LABEL: name: sample_d_1d
2897 ; GFX9: bb.1.main_body:
2898 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2900 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2901 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2902 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2903 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2904 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2905 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2906 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2907 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2908 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2909 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2910 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2911 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2912 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2913 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2914 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2915 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2916 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2917 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2918 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2919 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2920 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2921 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2922 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2923 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2924 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2925 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2926 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2927 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2928 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2929 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2930 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2931 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2933 ; GFX10-LABEL: name: sample_d_1d
2934 ; GFX10: bb.1.main_body:
2935 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2936 ; GFX10-NEXT: {{ $}}
2937 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2938 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2939 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2940 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2941 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2942 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2943 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2944 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2945 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2946 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2947 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2948 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2949 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2950 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2951 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2952 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2953 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2954 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2955 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2956 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2957 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2958 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2959 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2960 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2961 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2962 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2963 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2964 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2965 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2966 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2967 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2969 ; GFX11-LABEL: name: sample_d_1d
2970 ; GFX11: bb.1.main_body:
2971 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2972 ; GFX11-NEXT: {{ $}}
2973 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2974 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2975 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2976 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2977 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2978 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2979 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2980 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2981 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2982 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2983 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2984 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2985 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2986 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2987 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2988 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2989 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2990 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2991 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2992 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2993 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2994 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2995 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2996 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2997 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2998 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2999 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3000 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3001 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3002 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3003 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3005 ; GFX12-LABEL: name: sample_d_1d
3006 ; GFX12: bb.1.main_body:
3007 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
3008 ; GFX12-NEXT: {{ $}}
3009 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3010 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3011 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3012 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3013 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3014 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3015 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3016 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3017 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3018 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3019 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3020 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3021 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3022 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3023 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3024 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3025 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3026 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3027 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3028 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3029 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3030 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3031 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3032 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3033 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3034 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3035 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
3036 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
3037 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
3038 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
3039 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3041 %v = call <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3045 define amdgpu_ps <4 x float> @sample_d_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
3046 ; GFX9-LABEL: name: sample_d_2d
3047 ; GFX9: bb.1.main_body:
3048 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3050 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3051 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3052 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3053 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3054 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3055 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3056 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3057 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3058 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3059 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3060 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3061 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3062 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3063 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3064 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3065 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3066 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3067 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3068 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3069 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3070 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3071 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3072 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3073 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3074 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3075 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3076 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3077 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3078 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3079 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3080 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3081 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3082 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3083 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3084 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3085 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3086 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3088 ; GFX10-LABEL: name: sample_d_2d
3089 ; GFX10: bb.1.main_body:
3090 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3091 ; GFX10-NEXT: {{ $}}
3092 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3093 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3094 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3095 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3096 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3097 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3098 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3099 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3100 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3101 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3102 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3103 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3104 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3105 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3106 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3107 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3108 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3109 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3110 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3111 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3112 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3113 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3114 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3115 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3116 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3117 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3118 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3119 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3120 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3121 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3122 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3123 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3124 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3125 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3126 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3127 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3129 ; GFX11-LABEL: name: sample_d_2d
3130 ; GFX11: bb.1.main_body:
3131 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3132 ; GFX11-NEXT: {{ $}}
3133 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3134 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3135 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3136 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3137 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3138 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3139 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3140 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3141 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3142 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3143 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3144 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3145 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3146 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3147 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3148 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3149 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3150 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3151 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3152 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3153 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3154 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3155 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3156 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3157 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3158 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3159 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3160 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3161 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3162 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3163 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3164 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3165 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3166 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3167 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3168 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3170 ; GFX12-LABEL: name: sample_d_2d
3171 ; GFX12: bb.1.main_body:
3172 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3173 ; GFX12-NEXT: {{ $}}
3174 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3175 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3176 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3177 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3178 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3179 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3180 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3181 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3182 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3183 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3184 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3185 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3186 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3187 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3188 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3189 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3190 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3191 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3192 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3193 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3194 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3195 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3196 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3197 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3198 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3199 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3200 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3201 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3202 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3203 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3204 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3205 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
3206 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
3207 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
3208 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
3209 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3211 %v = call <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3215 define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %drdh, half %dsdv, half %dtdv, half %drdv, half %s, half %t, half %r) {
3216 ; GFX9-LABEL: name: sample_d_3d
3217 ; GFX9: bb.1.main_body:
3218 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
3220 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3221 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3222 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3223 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3224 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3225 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3226 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3227 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3228 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3229 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3230 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3231 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3232 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3233 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3234 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3235 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3236 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3237 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3238 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3239 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3240 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3241 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3242 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3243 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3244 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3245 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3246 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3247 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3248 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3249 ; GFX9-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3250 ; GFX9-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
3251 ; GFX9-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
3252 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3253 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3254 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3255 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
3256 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
3257 ; GFX9-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
3258 ; GFX9-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
3259 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
3260 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3261 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3262 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3263 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3264 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3265 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3266 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3268 ; GFX10-LABEL: name: sample_d_3d
3269 ; GFX10: bb.1.main_body:
3270 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
3271 ; GFX10-NEXT: {{ $}}
3272 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3273 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3274 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3275 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3276 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3277 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3278 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3279 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3280 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3281 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3282 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3283 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3284 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3285 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3286 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3287 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3288 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3289 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3290 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3291 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3292 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3293 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3294 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3295 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3296 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3297 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3298 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3299 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3300 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3301 ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3302 ; GFX10-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
3303 ; GFX10-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
3304 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3305 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3306 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3307 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
3308 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
3309 ; GFX10-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
3310 ; GFX10-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
3311 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
3312 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3313 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3314 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3315 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3316 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3317 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3318 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3320 ; GFX11-LABEL: name: sample_d_3d
3321 ; GFX11: bb.1.main_body:
3322 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
3323 ; GFX11-NEXT: {{ $}}
3324 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3325 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3326 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3327 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3328 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3329 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3330 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3331 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3332 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3333 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3334 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3335 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3336 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3337 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3338 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3339 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3340 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3341 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3342 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3343 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3344 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3345 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3346 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3347 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3348 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3349 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3350 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3351 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3352 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3353 ; GFX11-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3354 ; GFX11-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
3355 ; GFX11-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
3356 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3357 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3358 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3359 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
3360 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
3361 ; GFX11-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
3362 ; GFX11-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
3363 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
3364 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3365 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3366 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3367 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3368 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3369 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3370 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3372 ; GFX12-LABEL: name: sample_d_3d
3373 ; GFX12: bb.1.main_body:
3374 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
3375 ; GFX12-NEXT: {{ $}}
3376 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3377 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3378 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3379 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3380 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3381 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3382 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3383 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3384 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3385 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3386 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3387 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3388 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3389 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3390 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3391 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3392 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3393 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3394 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3395 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3396 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3397 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3398 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3399 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3400 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3401 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3402 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3403 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3404 ; GFX12-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3405 ; GFX12-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3406 ; GFX12-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
3407 ; GFX12-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
3408 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3409 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3410 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3411 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
3412 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
3413 ; GFX12-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
3414 ; GFX12-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
3415 ; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR5]](<2 x s16>), [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
3416 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3417 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3418 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
3419 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
3420 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
3421 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
3422 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3424 %v = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %drdh, half %dsdv, half %dtdv, half %drdv, half %s, half %t, half %r, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3428 define amdgpu_ps <4 x float> @sample_c_d_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s) {
3429 ; GFX9-LABEL: name: sample_c_d_1d
3430 ; GFX9: bb.1.main_body:
3431 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3433 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3434 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3435 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3436 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3437 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3438 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3439 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3440 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3441 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3442 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3443 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3444 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3445 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3446 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3447 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3448 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3449 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3450 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3451 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3452 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3453 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3454 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3455 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3456 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3457 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3458 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3459 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3460 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3461 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3462 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3463 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3464 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3465 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3466 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3468 ; GFX10-LABEL: name: sample_c_d_1d
3469 ; GFX10: bb.1.main_body:
3470 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3471 ; GFX10-NEXT: {{ $}}
3472 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3473 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3474 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3475 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3476 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3477 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3478 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3479 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3480 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3481 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3482 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3483 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3484 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3485 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3486 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3487 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3488 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3489 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3490 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3491 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3492 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3493 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3494 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3495 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3496 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3497 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3498 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3499 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3500 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3501 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3502 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3503 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3504 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3506 ; GFX11-LABEL: name: sample_c_d_1d
3507 ; GFX11: bb.1.main_body:
3508 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3509 ; GFX11-NEXT: {{ $}}
3510 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3511 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3512 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3513 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3514 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3515 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3516 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3517 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3518 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3519 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3520 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3521 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3522 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3523 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3524 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3525 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3526 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3527 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3528 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3529 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3530 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3531 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3532 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3533 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3534 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3535 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3536 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3537 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3538 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3539 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3540 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3541 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3542 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3544 ; GFX12-LABEL: name: sample_c_d_1d
3545 ; GFX12: bb.1.main_body:
3546 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3547 ; GFX12-NEXT: {{ $}}
3548 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3549 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3550 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3551 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3552 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3553 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3554 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3555 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3556 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3557 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3558 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3559 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3560 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3561 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3562 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3563 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3564 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3565 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3566 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3567 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3568 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3569 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3570 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3571 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3572 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3573 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3574 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3575 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3576 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
3577 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
3578 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
3579 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
3580 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3582 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3586 define amdgpu_ps <4 x float> @sample_c_d_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
3587 ; GFX9-LABEL: name: sample_c_d_2d
3588 ; GFX9: bb.1.main_body:
3589 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3591 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3592 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3593 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3594 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3595 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3596 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3597 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3598 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3599 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3600 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3601 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3602 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3603 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3604 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3605 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3606 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3607 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3608 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3609 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3610 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3611 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3612 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3613 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3614 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3615 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3616 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3617 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3618 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3619 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3620 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3621 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3622 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3623 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3624 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3625 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3626 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3627 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3628 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3629 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3631 ; GFX10-LABEL: name: sample_c_d_2d
3632 ; GFX10: bb.1.main_body:
3633 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3634 ; GFX10-NEXT: {{ $}}
3635 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3636 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3637 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3638 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3639 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3640 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3641 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3642 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3643 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3644 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3645 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3646 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3647 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3648 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3649 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3650 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3651 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3652 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3653 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3654 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3655 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3656 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3657 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3658 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3659 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3660 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3661 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3662 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3663 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3664 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3665 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3666 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3667 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3668 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3669 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3670 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3671 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3672 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3674 ; GFX11-LABEL: name: sample_c_d_2d
3675 ; GFX11: bb.1.main_body:
3676 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3677 ; GFX11-NEXT: {{ $}}
3678 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3679 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3680 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3681 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3682 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3683 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3684 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3685 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3686 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3687 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3688 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3689 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3690 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3691 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3692 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3693 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3694 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3695 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3696 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3697 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3698 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3699 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3700 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3701 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3702 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3703 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3704 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3705 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3706 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3707 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3708 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3709 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3710 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3711 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3712 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3713 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3714 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3715 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3717 ; GFX12-LABEL: name: sample_c_d_2d
3718 ; GFX12: bb.1.main_body:
3719 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3720 ; GFX12-NEXT: {{ $}}
3721 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3722 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3723 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3724 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3725 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3726 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3727 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3728 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3729 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3730 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3731 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3732 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3733 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3734 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3735 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3736 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3737 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3738 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3739 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3740 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3741 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3742 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3743 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3744 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3745 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3746 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3747 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3748 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3749 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3750 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3751 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3752 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3753 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3754 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
3755 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
3756 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
3757 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
3758 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3760 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3764 define amdgpu_ps <4 x float> @sample_d_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s, half %clamp) {
3765 ; GFX9-LABEL: name: sample_d_cl_1d
3766 ; GFX9: bb.1.main_body:
3767 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3769 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3770 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3771 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3772 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3773 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3774 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3775 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3776 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3777 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3778 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3779 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3780 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3781 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3782 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3783 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3784 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3785 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3786 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3787 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3788 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3789 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3790 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3791 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3792 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3793 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3794 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3795 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3796 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3797 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3798 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3799 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3800 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3801 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3802 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3804 ; GFX10-LABEL: name: sample_d_cl_1d
3805 ; GFX10: bb.1.main_body:
3806 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3807 ; GFX10-NEXT: {{ $}}
3808 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3809 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3810 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3811 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3812 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3813 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3814 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3815 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3816 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3817 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3818 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3819 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3820 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3821 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3822 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3823 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3824 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3825 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3826 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3827 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3828 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3829 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3830 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3831 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3832 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3833 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3834 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3835 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3836 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3837 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3838 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3839 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3840 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3842 ; GFX11-LABEL: name: sample_d_cl_1d
3843 ; GFX11: bb.1.main_body:
3844 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3845 ; GFX11-NEXT: {{ $}}
3846 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3847 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3848 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3849 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3850 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3851 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3852 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3853 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3854 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3855 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3856 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3857 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3858 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3859 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3860 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3861 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3862 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3863 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3864 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3865 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3866 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3867 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3868 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3869 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3870 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3871 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3872 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3873 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3874 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3875 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3876 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3877 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3878 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3880 ; GFX12-LABEL: name: sample_d_cl_1d
3881 ; GFX12: bb.1.main_body:
3882 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3883 ; GFX12-NEXT: {{ $}}
3884 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3885 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3886 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3887 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3888 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3889 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3890 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3891 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3892 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3893 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3894 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3895 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3896 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3897 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3898 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3899 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3900 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3901 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3902 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3903 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3904 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3905 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3906 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3907 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3908 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3909 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3910 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3911 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3912 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
3913 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
3914 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
3915 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
3916 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3918 %v = call <4 x float> @llvm.amdgcn.image.sample.d.cl.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3922 define amdgpu_ps <4 x float> @sample_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
3923 ; GFX9-LABEL: name: sample_d_cl_2d
3924 ; GFX9: bb.1.main_body:
3925 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3927 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3928 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3929 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3930 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3931 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3932 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3933 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3934 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3935 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3936 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3937 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3938 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3939 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3940 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3941 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3942 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3943 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3944 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3945 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3946 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3947 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3948 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3949 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3950 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3951 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3952 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3953 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3954 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3955 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3956 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3957 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3958 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3959 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3960 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
3961 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3962 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3963 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3964 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3965 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3966 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3967 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3969 ; GFX10-LABEL: name: sample_d_cl_2d
3970 ; GFX10: bb.1.main_body:
3971 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3972 ; GFX10-NEXT: {{ $}}
3973 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3974 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3975 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3976 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3977 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3978 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3979 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3980 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3981 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3982 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3983 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3984 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3985 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3986 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3987 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3988 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3989 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3990 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3991 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3992 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3993 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3994 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3995 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3996 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3997 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3998 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3999 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4000 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4001 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4002 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4003 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4004 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4005 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4006 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4007 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4008 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4009 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4010 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4011 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4012 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4014 ; GFX11-LABEL: name: sample_d_cl_2d
4015 ; GFX11: bb.1.main_body:
4016 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4017 ; GFX11-NEXT: {{ $}}
4018 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4019 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4020 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4021 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4022 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4023 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4024 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4025 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4026 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4027 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4028 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4029 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4030 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4031 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4032 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4033 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4034 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4035 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4036 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4037 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4038 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4039 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4040 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4041 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4042 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4043 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4044 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4045 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4046 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4047 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4048 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4049 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4050 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4051 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4052 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4053 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4054 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4055 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4056 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4057 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4059 ; GFX12-LABEL: name: sample_d_cl_2d
4060 ; GFX12: bb.1.main_body:
4061 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4062 ; GFX12-NEXT: {{ $}}
4063 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4064 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4065 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4066 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4067 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4068 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4069 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4070 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4071 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4072 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4073 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4074 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4075 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4076 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4077 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4078 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4079 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4080 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4081 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4082 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4083 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4084 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4085 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4086 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4087 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4088 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4089 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4090 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4091 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4092 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4093 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4094 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4095 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4096 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4097 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4098 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
4099 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
4100 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
4101 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
4102 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4104 %v = call <4 x float> @llvm.amdgcn.image.sample.d.cl.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4108 define amdgpu_ps <4 x float> @sample_c_d_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp) {
4109 ; GFX9-LABEL: name: sample_c_d_cl_1d
4110 ; GFX9: bb.1.main_body:
4111 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4113 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4114 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4115 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4116 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4117 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4118 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4119 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4120 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4121 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4122 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4123 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4124 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4125 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4126 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4127 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4128 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4129 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4130 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4131 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4132 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4133 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4134 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4135 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4136 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4137 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4138 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4139 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4140 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4141 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
4142 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4143 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4144 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4145 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4146 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4147 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4148 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4150 ; GFX10-LABEL: name: sample_c_d_cl_1d
4151 ; GFX10: bb.1.main_body:
4152 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4153 ; GFX10-NEXT: {{ $}}
4154 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4155 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4156 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4157 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4158 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4159 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4160 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4161 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4162 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4163 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4164 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4165 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4166 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4167 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4168 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4169 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4170 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4171 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4172 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4173 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4174 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4175 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4176 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4177 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4178 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4179 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4180 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4181 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4182 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4183 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4184 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4185 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4186 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4187 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4188 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4190 ; GFX11-LABEL: name: sample_c_d_cl_1d
4191 ; GFX11: bb.1.main_body:
4192 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4193 ; GFX11-NEXT: {{ $}}
4194 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4195 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4196 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4197 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4198 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4199 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4200 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4201 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4202 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4203 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4204 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4205 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4206 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4207 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4208 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4209 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4210 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4211 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4212 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4213 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4214 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4215 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4216 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4217 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4218 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4219 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4220 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4221 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4222 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4223 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4224 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4225 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4226 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4227 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4228 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4230 ; GFX12-LABEL: name: sample_c_d_cl_1d
4231 ; GFX12: bb.1.main_body:
4232 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4233 ; GFX12-NEXT: {{ $}}
4234 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4235 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4236 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4237 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4238 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4239 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4240 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4241 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4242 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4243 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4244 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4245 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4246 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4247 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4248 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4249 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4250 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4251 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4252 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4253 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4254 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4255 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4256 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4257 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4258 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4259 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4260 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4261 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4262 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4263 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4264 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
4265 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
4266 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
4267 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
4268 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4270 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4274 define amdgpu_ps <4 x float> @sample_c_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
4275 ; GFX9-LABEL: name: sample_c_d_cl_2d
4276 ; GFX9: bb.1.main_body:
4277 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4279 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4280 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4281 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4282 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4283 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4284 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4285 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4286 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4287 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4288 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4289 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4290 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4291 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4292 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4293 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4294 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4295 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4296 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4297 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4298 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4299 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4300 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4301 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4302 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4303 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4304 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4305 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4306 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4307 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4308 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4309 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4310 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4311 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4312 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4313 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4314 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
4315 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[CONCAT_VECTORS]](<10 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4316 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4317 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4318 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4319 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4320 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4321 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4323 ; GFX10-LABEL: name: sample_c_d_cl_2d
4324 ; GFX10: bb.1.main_body:
4325 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4326 ; GFX10-NEXT: {{ $}}
4327 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4328 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4329 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4330 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4331 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4332 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4333 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4334 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4335 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4336 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4337 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4338 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4339 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4340 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4341 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4342 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4343 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4344 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4345 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4346 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4347 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4348 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4349 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4350 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4351 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4352 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4353 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4354 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4355 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4356 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4357 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4358 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4359 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4360 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4361 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4362 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4363 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4364 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4365 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4366 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4367 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4368 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4370 ; GFX11-LABEL: name: sample_c_d_cl_2d
4371 ; GFX11: bb.1.main_body:
4372 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4373 ; GFX11-NEXT: {{ $}}
4374 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4375 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4376 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4377 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4378 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4379 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4380 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4381 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4382 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4383 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4384 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4385 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4386 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4387 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4388 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4389 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4390 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4391 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4392 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4393 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4394 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4395 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4396 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4397 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4398 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4399 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4400 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4401 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4402 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4403 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4404 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4405 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4406 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4407 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4408 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4409 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4410 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4411 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4412 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4413 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4414 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4415 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4417 ; GFX12-LABEL: name: sample_c_d_cl_2d
4418 ; GFX12: bb.1.main_body:
4419 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4420 ; GFX12-NEXT: {{ $}}
4421 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4422 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4423 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4424 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4425 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4426 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4427 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4428 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4429 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4430 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4431 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4432 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4433 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4434 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4435 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4436 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4437 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4438 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4439 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4440 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4441 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4442 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4443 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4444 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4445 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4446 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4447 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4448 ; GFX12-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4449 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4450 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4451 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4452 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4453 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4454 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4455 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4456 ; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
4457 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4458 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4459 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
4460 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
4461 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
4462 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
4463 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4465 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4469 define amdgpu_ps <4 x float> @sample_cd_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s) {
4470 ; GFX9-LABEL: name: sample_cd_1d
4471 ; GFX9: bb.1.main_body:
4472 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4474 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4475 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4476 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4477 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4478 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4479 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4480 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4481 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4482 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4483 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4484 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4485 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4486 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4487 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4488 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4489 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4490 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4491 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4492 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4493 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4494 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4495 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4496 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4497 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4498 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
4499 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4500 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4501 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4502 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4503 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4504 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4505 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4507 ; GFX10-LABEL: name: sample_cd_1d
4508 ; GFX10: bb.1.main_body:
4509 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4510 ; GFX10-NEXT: {{ $}}
4511 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4512 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4513 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4514 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4515 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4516 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4517 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4518 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4519 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4520 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4521 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4522 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4523 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4524 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4525 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4526 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4527 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4528 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4529 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4530 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4531 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4532 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4533 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4534 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4535 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4536 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4537 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4538 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4539 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4540 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4541 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4543 ; GFX11-LABEL: name: sample_cd_1d
4544 ; GFX11: bb.1.main_body:
4545 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4546 ; GFX11-NEXT: {{ $}}
4547 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4548 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4549 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4550 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4551 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4552 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4553 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4554 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4555 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4556 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4557 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4558 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4559 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4560 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4561 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4562 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4563 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4564 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4565 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4566 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4567 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4568 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4569 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4570 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4571 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4572 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4573 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4574 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4575 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4576 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4577 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4579 ; GFX12-LABEL: name: sample_cd_1d
4580 ; GFX12: bb.1.main_body:
4581 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4582 ; GFX12-NEXT: {{ $}}
4583 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4584 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4585 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4586 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4587 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4588 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4589 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4590 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4591 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4592 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4593 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4594 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4595 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4596 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4597 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4598 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4599 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4600 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4601 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4602 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4603 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4604 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4605 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4606 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4607 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4608 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4609 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
4610 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
4611 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
4612 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
4613 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4615 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4619 define amdgpu_ps <4 x float> @sample_cd_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
4620 ; GFX9-LABEL: name: sample_cd_2d
4621 ; GFX9: bb.1.main_body:
4622 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
4624 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4625 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4626 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4627 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4628 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4629 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4630 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4631 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4632 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4633 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4634 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4635 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4636 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4637 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4638 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4639 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4640 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4641 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4642 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4643 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4644 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4645 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4646 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4647 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4648 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4649 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4650 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4651 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4652 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4653 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
4654 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4655 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4656 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4657 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4658 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4659 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4660 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4662 ; GFX10-LABEL: name: sample_cd_2d
4663 ; GFX10: bb.1.main_body:
4664 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
4665 ; GFX10-NEXT: {{ $}}
4666 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4667 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4668 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4669 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4670 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4671 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4672 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4673 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4674 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4675 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4676 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4677 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4678 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4679 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4680 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4681 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4682 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4683 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4684 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4685 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4686 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4687 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4688 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4689 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4690 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4691 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4692 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4693 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4694 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4695 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4696 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4697 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4698 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4699 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4700 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4701 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4703 ; GFX11-LABEL: name: sample_cd_2d
4704 ; GFX11: bb.1.main_body:
4705 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
4706 ; GFX11-NEXT: {{ $}}
4707 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4708 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4709 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4710 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4711 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4712 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4713 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4714 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4715 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4716 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4717 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4718 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4719 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4720 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4721 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4722 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4723 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4724 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4725 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4726 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4727 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4728 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4729 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4730 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4731 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4732 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4733 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4734 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4735 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4736 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4737 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4738 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4739 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4740 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4741 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4742 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4744 ; GFX12-LABEL: name: sample_cd_2d
4745 ; GFX12: bb.1.main_body:
4746 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
4747 ; GFX12-NEXT: {{ $}}
4748 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4749 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4750 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4751 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4752 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4753 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4754 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4755 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4756 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4757 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4758 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4759 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4760 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4761 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4762 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4763 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4764 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4765 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4766 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4767 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4768 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4769 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4770 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4771 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4772 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4773 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4774 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4775 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4776 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4777 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4778 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4779 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
4780 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
4781 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
4782 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
4783 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4785 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4789 define amdgpu_ps <4 x float> @sample_c_cd_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s) {
4790 ; GFX9-LABEL: name: sample_c_cd_1d
4791 ; GFX9: bb.1.main_body:
4792 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4794 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4795 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4796 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4797 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4798 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4799 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4800 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4801 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4802 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4803 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4804 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4805 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4806 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4807 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4808 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4809 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4810 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4811 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4812 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4813 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4814 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4815 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4816 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4817 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4818 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4819 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4820 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
4821 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4822 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4823 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4824 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4825 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4826 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4827 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4829 ; GFX10-LABEL: name: sample_c_cd_1d
4830 ; GFX10: bb.1.main_body:
4831 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4832 ; GFX10-NEXT: {{ $}}
4833 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4834 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4835 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4836 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4837 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4838 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4839 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4840 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4841 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4842 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4843 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4844 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4845 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4846 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4847 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4848 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4849 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4850 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4851 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4852 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4853 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4854 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4855 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4856 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4857 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4858 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4859 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4860 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4861 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4862 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4863 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4864 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4865 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4867 ; GFX11-LABEL: name: sample_c_cd_1d
4868 ; GFX11: bb.1.main_body:
4869 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4870 ; GFX11-NEXT: {{ $}}
4871 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4872 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4873 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4874 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4875 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4876 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4877 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4878 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4879 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4880 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4881 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4882 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4883 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4884 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4885 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4886 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4887 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4888 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4889 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4890 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4891 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4892 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4893 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4894 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4895 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4896 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4897 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4898 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4899 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4900 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4901 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4902 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4903 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4905 ; GFX12-LABEL: name: sample_c_cd_1d
4906 ; GFX12: bb.1.main_body:
4907 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4908 ; GFX12-NEXT: {{ $}}
4909 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4910 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4911 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4912 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4913 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4914 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4915 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4916 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4917 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4918 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4919 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4920 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4921 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4922 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4923 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4924 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4925 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4926 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4927 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4928 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4929 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4930 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4931 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4932 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4933 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4934 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4935 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4936 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4937 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
4938 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
4939 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
4940 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
4941 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4943 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4947 define amdgpu_ps <4 x float> @sample_c_cd_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
4948 ; GFX9-LABEL: name: sample_c_cd_2d
4949 ; GFX9: bb.1.main_body:
4950 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4952 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4953 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4954 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4955 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4956 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4957 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4958 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4959 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4960 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4961 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4962 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4963 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4964 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4965 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4966 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4967 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4968 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4969 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4970 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4971 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4972 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4973 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4974 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4975 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4976 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4977 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4978 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4979 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4980 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4981 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4982 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4983 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
4984 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4985 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4986 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4987 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4988 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4989 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4990 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4992 ; GFX10-LABEL: name: sample_c_cd_2d
4993 ; GFX10: bb.1.main_body:
4994 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4995 ; GFX10-NEXT: {{ $}}
4996 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4997 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4998 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4999 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5000 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5001 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5002 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5003 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5004 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5005 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5006 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5007 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5008 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5009 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5010 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5011 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5012 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5013 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5014 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5015 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5016 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5017 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5018 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5019 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5020 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5021 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5022 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5023 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5024 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5025 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5026 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5027 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5028 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5029 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5030 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5031 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5032 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5033 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5035 ; GFX11-LABEL: name: sample_c_cd_2d
5036 ; GFX11: bb.1.main_body:
5037 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
5038 ; GFX11-NEXT: {{ $}}
5039 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5040 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5041 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5042 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5043 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5044 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5045 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5046 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5047 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5048 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5049 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5050 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5051 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5052 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5053 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5054 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5055 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5056 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5057 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5058 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5059 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5060 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5061 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5062 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5063 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5064 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5065 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5066 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5067 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5068 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5069 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5070 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5071 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5072 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5073 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5074 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5075 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5076 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5078 ; GFX12-LABEL: name: sample_c_cd_2d
5079 ; GFX12: bb.1.main_body:
5080 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
5081 ; GFX12-NEXT: {{ $}}
5082 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5083 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5084 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5085 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5086 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5087 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5088 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5089 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5090 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5091 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5092 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5093 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5094 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5095 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5096 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5097 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5098 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5099 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5100 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5101 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5102 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5103 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5104 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5105 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5106 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5107 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5108 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5109 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5110 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5111 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5112 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5113 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5114 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5115 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
5116 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
5117 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
5118 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
5119 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5121 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5125 define amdgpu_ps <4 x float> @sample_cd_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s, half %clamp) {
5126 ; GFX9-LABEL: name: sample_cd_cl_1d
5127 ; GFX9: bb.1.main_body:
5128 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
5130 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5131 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5132 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5133 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5134 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5135 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5136 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5137 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5138 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5139 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5140 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5141 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5142 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5143 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5144 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5145 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5146 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5147 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5148 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5149 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5150 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5151 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5152 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5153 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5154 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5155 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5156 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
5157 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5158 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5159 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5160 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5161 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5162 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5163 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5165 ; GFX10-LABEL: name: sample_cd_cl_1d
5166 ; GFX10: bb.1.main_body:
5167 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
5168 ; GFX10-NEXT: {{ $}}
5169 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5170 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5171 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5172 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5173 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5174 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5175 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5176 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5177 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5178 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5179 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5180 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5181 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5182 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5183 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5184 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5185 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5186 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5187 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5188 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5189 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5190 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5191 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5192 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5193 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5194 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5195 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5196 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5197 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5198 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5199 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5200 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5201 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5203 ; GFX11-LABEL: name: sample_cd_cl_1d
5204 ; GFX11: bb.1.main_body:
5205 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
5206 ; GFX11-NEXT: {{ $}}
5207 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5208 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5209 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5210 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5211 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5212 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5213 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5214 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5215 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5216 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5217 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5218 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5219 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5220 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5221 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5222 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5223 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5224 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5225 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5226 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5227 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5228 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5229 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5230 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5231 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5232 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5233 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5234 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5235 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5236 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5237 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5238 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5239 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5241 ; GFX12-LABEL: name: sample_cd_cl_1d
5242 ; GFX12: bb.1.main_body:
5243 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
5244 ; GFX12-NEXT: {{ $}}
5245 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5246 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5247 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5248 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5249 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5250 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5251 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5252 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5253 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5254 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5255 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5256 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5257 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5258 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5259 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5260 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5261 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5262 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5263 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5264 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5265 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5266 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5267 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5268 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5269 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5270 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5271 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5272 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5273 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
5274 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
5275 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
5276 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
5277 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5279 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5283 define amdgpu_ps <4 x float> @sample_cd_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
5284 ; GFX9-LABEL: name: sample_cd_cl_2d
5285 ; GFX9: bb.1.main_body:
5286 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
5288 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5289 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5290 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5291 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5292 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5293 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5294 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5295 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5296 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5297 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5298 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5299 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5300 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5301 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5302 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5303 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5304 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5305 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5306 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5307 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5308 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5309 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5310 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5311 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5312 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5313 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5314 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5315 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5316 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5317 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5318 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5319 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5320 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5321 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5322 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5323 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5324 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5325 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5326 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5327 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5328 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5330 ; GFX10-LABEL: name: sample_cd_cl_2d
5331 ; GFX10: bb.1.main_body:
5332 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
5333 ; GFX10-NEXT: {{ $}}
5334 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5335 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5336 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5337 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5338 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5339 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5340 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5341 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5342 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5343 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5344 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5345 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5346 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5347 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5348 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5349 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5350 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5351 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5352 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5353 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5354 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5355 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5356 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5357 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5358 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5359 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5360 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5361 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5362 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5363 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5364 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5365 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5366 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5367 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5368 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5369 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5370 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5371 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5372 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5373 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5375 ; GFX11-LABEL: name: sample_cd_cl_2d
5376 ; GFX11: bb.1.main_body:
5377 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
5378 ; GFX11-NEXT: {{ $}}
5379 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5380 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5381 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5382 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5383 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5384 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5385 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5386 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5387 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5388 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5389 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5390 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5391 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5392 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5393 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5394 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5395 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5396 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5397 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5398 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5399 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5400 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5401 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5402 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5403 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5404 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5405 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5406 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5407 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5408 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5409 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5410 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5411 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5412 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5413 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5414 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5415 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5416 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5417 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5418 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5420 ; GFX12-LABEL: name: sample_cd_cl_2d
5421 ; GFX12: bb.1.main_body:
5422 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
5423 ; GFX12-NEXT: {{ $}}
5424 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5425 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5426 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5427 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5428 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5429 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5430 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5431 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5432 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5433 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5434 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5435 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5436 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5437 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5438 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5439 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5440 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5441 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5442 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5443 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5444 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5445 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5446 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5447 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5448 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5449 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5450 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5451 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5452 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5453 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5454 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5455 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5456 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5457 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5458 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5459 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
5460 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
5461 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
5462 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
5463 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5465 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5469 define amdgpu_ps <4 x float> @sample_c_cd_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp) {
5470 ; GFX9-LABEL: name: sample_c_cd_cl_1d
5471 ; GFX9: bb.1.main_body:
5472 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
5474 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5475 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5476 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5477 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5478 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5479 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5480 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5481 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5482 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5483 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5484 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5485 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5486 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5487 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5488 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5489 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5490 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5491 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5492 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5493 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5494 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5495 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5496 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5497 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5498 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5499 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5500 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5501 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5502 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
5503 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5504 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5505 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5506 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5507 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5508 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5509 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5511 ; GFX10-LABEL: name: sample_c_cd_cl_1d
5512 ; GFX10: bb.1.main_body:
5513 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
5514 ; GFX10-NEXT: {{ $}}
5515 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5516 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5517 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5518 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5519 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5520 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5521 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5522 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5523 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5524 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5525 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5526 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5527 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5528 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5529 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5530 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5531 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5532 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5533 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5534 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5535 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5536 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5537 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5538 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5539 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5540 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5541 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5542 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5543 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5544 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5545 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5546 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5547 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5548 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5549 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5551 ; GFX11-LABEL: name: sample_c_cd_cl_1d
5552 ; GFX11: bb.1.main_body:
5553 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
5554 ; GFX11-NEXT: {{ $}}
5555 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5556 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5557 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5558 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5559 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5560 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5561 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5562 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5563 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5564 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5565 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5566 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5567 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5568 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5569 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5570 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5571 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5572 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5573 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5574 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5575 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5576 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5577 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5578 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5579 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5580 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5581 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5582 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5583 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5584 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5585 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5586 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5587 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5588 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5589 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5591 ; GFX12-LABEL: name: sample_c_cd_cl_1d
5592 ; GFX12: bb.1.main_body:
5593 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
5594 ; GFX12-NEXT: {{ $}}
5595 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5596 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5597 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5598 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5599 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5600 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5601 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5602 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5603 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5604 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5605 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5606 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5607 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5608 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5609 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5610 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5611 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5612 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5613 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5614 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5615 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5616 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5617 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5618 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5619 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5620 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5621 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
5622 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5623 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5624 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5625 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
5626 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
5627 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
5628 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
5629 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5631 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5635 define amdgpu_ps <4 x float> @sample_c_cd_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
5636 ; GFX9-LABEL: name: sample_c_cd_cl_2d
5637 ; GFX9: bb.1.main_body:
5638 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
5640 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5641 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5642 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5643 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5644 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5645 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5646 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5647 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5648 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5649 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5650 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5651 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5652 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5653 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5654 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5655 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5656 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5657 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5658 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5659 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5660 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5661 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5662 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5663 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5664 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5665 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5666 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5667 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5668 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5669 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5670 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5671 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5672 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5673 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5674 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5675 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5676 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[CONCAT_VECTORS]](<10 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5677 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5678 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5679 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5680 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5681 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5682 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5684 ; GFX10-LABEL: name: sample_c_cd_cl_2d
5685 ; GFX10: bb.1.main_body:
5686 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
5687 ; GFX10-NEXT: {{ $}}
5688 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5689 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5690 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5691 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5692 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5693 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5694 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5695 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5696 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5697 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5698 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5699 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5700 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5701 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5702 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5703 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5704 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5705 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5706 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5707 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5708 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5709 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5710 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5711 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5712 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5713 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5714 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5715 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5716 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5717 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5718 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5719 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5720 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5721 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5722 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5723 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5724 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5725 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5726 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5727 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5728 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5729 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5731 ; GFX11-LABEL: name: sample_c_cd_cl_2d
5732 ; GFX11: bb.1.main_body:
5733 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
5734 ; GFX11-NEXT: {{ $}}
5735 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5736 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5737 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5738 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5739 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5740 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5741 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5742 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5743 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5744 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5745 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5746 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5747 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5748 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5749 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5750 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5751 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5752 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5753 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5754 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5755 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5756 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5757 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5758 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5759 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5760 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5761 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5762 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5763 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5764 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5765 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5766 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5767 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5768 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5769 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5770 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5771 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5772 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5773 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5774 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5775 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5776 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5778 ; GFX12-LABEL: name: sample_c_cd_cl_2d
5779 ; GFX12: bb.1.main_body:
5780 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
5781 ; GFX12-NEXT: {{ $}}
5782 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5783 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5784 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5785 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5786 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5787 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5788 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5789 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5790 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5791 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5792 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5793 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5794 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5795 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5796 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5797 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5798 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5799 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5800 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5801 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5802 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5803 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5804 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5805 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5806 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5807 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5808 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5809 ; GFX12-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5810 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5811 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5812 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5813 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5814 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5815 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5816 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5817 ; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5818 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5819 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5820 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
5821 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
5822 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
5823 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
5824 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5826 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5830 define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %lod) {
5831 ; GFX9-LABEL: name: sample_l_1d
5832 ; GFX9: bb.1.main_body:
5833 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5835 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5836 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5837 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5838 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5839 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5840 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5841 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5842 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5843 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5844 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5845 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5846 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5847 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5848 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5849 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5850 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5851 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5852 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5853 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5854 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5855 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5856 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5857 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5858 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5859 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5860 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5862 ; GFX10-LABEL: name: sample_l_1d
5863 ; GFX10: bb.1.main_body:
5864 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5865 ; GFX10-NEXT: {{ $}}
5866 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5867 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5868 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5869 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5870 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5871 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5872 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5873 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5874 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5875 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5876 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5877 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5878 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5879 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5880 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5881 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5882 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5883 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5884 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5885 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5886 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5887 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5888 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5889 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5890 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5891 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5893 ; GFX11-LABEL: name: sample_l_1d
5894 ; GFX11: bb.1.main_body:
5895 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5896 ; GFX11-NEXT: {{ $}}
5897 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5898 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5899 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5900 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5901 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5902 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5903 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5904 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5905 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5906 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5907 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5908 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5909 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5910 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5911 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5912 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5913 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5914 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5915 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5916 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5917 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5918 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5919 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5920 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5921 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5922 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5924 ; GFX12-LABEL: name: sample_l_1d
5925 ; GFX12: bb.1.main_body:
5926 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5927 ; GFX12-NEXT: {{ $}}
5928 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5929 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5930 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5931 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5932 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5933 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5934 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5935 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5936 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5937 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5938 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5939 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5940 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5941 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5942 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5943 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5944 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5945 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5946 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5947 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5948 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5949 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
5950 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
5951 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
5952 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
5953 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5955 %v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32 15, half %s, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5959 define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
5960 ; GFX9-LABEL: name: sample_l_2d
5961 ; GFX9: bb.1.main_body:
5962 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
5964 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5965 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5966 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5967 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5968 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5969 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5970 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5971 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5972 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5973 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5974 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5975 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5976 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5977 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5978 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5979 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5980 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5981 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5982 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5983 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5984 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5985 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5986 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
5987 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
5988 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5989 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5990 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5991 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5992 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5993 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5994 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5996 ; GFX10-LABEL: name: sample_l_2d
5997 ; GFX10: bb.1.main_body:
5998 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
5999 ; GFX10-NEXT: {{ $}}
6000 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6001 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6002 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6003 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6004 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6005 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6006 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6007 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6008 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6009 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6010 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6011 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6012 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6013 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6014 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6015 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6016 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6017 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6018 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6019 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6020 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6021 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6022 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6023 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
6024 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6025 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6026 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6027 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6028 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6029 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6030 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6032 ; GFX11-LABEL: name: sample_l_2d
6033 ; GFX11: bb.1.main_body:
6034 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6035 ; GFX11-NEXT: {{ $}}
6036 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6037 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6038 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6039 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6040 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6041 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6042 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6043 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6044 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6045 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6046 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6047 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6048 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6049 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6050 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6051 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6052 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6053 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6054 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6055 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6056 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6057 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6058 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6059 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
6060 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6061 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6062 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6063 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6064 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6065 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6066 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6068 ; GFX12-LABEL: name: sample_l_2d
6069 ; GFX12: bb.1.main_body:
6070 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6071 ; GFX12-NEXT: {{ $}}
6072 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6073 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6074 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6075 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6076 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6077 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6078 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6079 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6080 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6081 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6082 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6083 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6084 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6085 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6086 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6087 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6088 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6089 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6090 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6091 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6092 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6093 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6094 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6095 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6096 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6097 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6098 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6099 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6100 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6101 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6103 %v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32 15, half %s, half %t, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6107 define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %lod) {
6108 ; GFX9-LABEL: name: sample_c_l_1d
6109 ; GFX9: bb.1.main_body:
6110 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6112 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6113 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6114 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6115 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6116 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6117 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6118 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6119 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6120 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6121 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6122 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6123 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6124 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6125 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6126 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6127 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6128 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6129 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6130 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6131 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6132 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6133 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6134 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
6135 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6136 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
6137 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
6138 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
6139 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
6140 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6142 ; GFX10-LABEL: name: sample_c_l_1d
6143 ; GFX10: bb.1.main_body:
6144 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6145 ; GFX10-NEXT: {{ $}}
6146 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6147 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6148 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6149 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6150 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6151 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6152 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6153 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6154 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6155 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6156 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6157 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6158 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6159 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6160 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6161 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6162 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6163 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6164 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6165 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6166 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6167 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6168 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6169 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6170 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6171 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6172 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6173 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6174 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6176 ; GFX11-LABEL: name: sample_c_l_1d
6177 ; GFX11: bb.1.main_body:
6178 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6179 ; GFX11-NEXT: {{ $}}
6180 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6181 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6182 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6183 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6184 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6185 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6186 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6187 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6188 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6189 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6190 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6191 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6192 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6193 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6194 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6195 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6196 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6197 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6198 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6199 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6200 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6201 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6202 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6203 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6204 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6205 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6206 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6207 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6208 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6210 ; GFX12-LABEL: name: sample_c_l_1d
6211 ; GFX12: bb.1.main_body:
6212 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6213 ; GFX12-NEXT: {{ $}}
6214 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6215 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6216 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6217 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6218 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6219 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6220 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6221 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6222 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6223 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6224 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6225 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6226 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6227 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6228 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6229 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6230 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6231 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6232 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6233 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6234 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6235 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6236 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6237 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6238 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6239 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6240 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6241 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6243 %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32 15, float %zcompare, half %s, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6247 define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
6248 ; GFX9-LABEL: name: sample_c_l_2d
6249 ; GFX9: bb.1.main_body:
6250 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
6252 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6253 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6254 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6255 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6256 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6257 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6258 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6259 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6260 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6261 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6262 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6263 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6264 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6265 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6266 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6267 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6268 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6269 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6270 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6271 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
6272 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
6273 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6274 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6275 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6276 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6277 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
6278 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
6279 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6280 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
6281 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
6282 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
6283 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
6284 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6286 ; GFX10-LABEL: name: sample_c_l_2d
6287 ; GFX10: bb.1.main_body:
6288 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
6289 ; GFX10-NEXT: {{ $}}
6290 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6291 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6292 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6293 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6294 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6295 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6296 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6297 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6298 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6299 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6300 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6301 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6302 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6303 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6304 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6305 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6306 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6307 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6308 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6309 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
6310 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
6311 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6312 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6313 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6314 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6315 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6316 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6317 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6318 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6319 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6320 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6321 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6323 ; GFX11-LABEL: name: sample_c_l_2d
6324 ; GFX11: bb.1.main_body:
6325 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
6326 ; GFX11-NEXT: {{ $}}
6327 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6328 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6329 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6330 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6331 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6332 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6333 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6334 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6335 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6336 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6337 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6338 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6339 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6340 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6341 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6342 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6343 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6344 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6345 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6346 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
6347 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
6348 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6349 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6350 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6351 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6352 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6353 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6354 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6355 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6356 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6357 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6358 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6360 ; GFX12-LABEL: name: sample_c_l_2d
6361 ; GFX12: bb.1.main_body:
6362 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
6363 ; GFX12-NEXT: {{ $}}
6364 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6365 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6366 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6367 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6368 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6369 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6370 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6371 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6372 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6373 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6374 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6375 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6376 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6377 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6378 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6379 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6380 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6381 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6382 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6383 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
6384 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
6385 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6386 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6387 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6388 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
6389 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6390 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6391 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6392 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6393 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6394 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6395 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6397 %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6401 define amdgpu_ps <4 x float> @sample_lz_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s) {
6402 ; GFX9-LABEL: name: sample_lz_1d
6403 ; GFX9: bb.1.main_body:
6404 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
6406 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6407 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6408 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6409 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6410 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6411 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6412 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6413 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6414 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6415 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6416 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6417 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6418 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6419 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6420 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6421 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6422 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6423 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6424 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
6425 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6426 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
6427 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
6428 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
6429 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
6430 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6432 ; GFX10-LABEL: name: sample_lz_1d
6433 ; GFX10: bb.1.main_body:
6434 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
6435 ; GFX10-NEXT: {{ $}}
6436 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6437 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6438 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6439 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6440 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6441 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6442 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6443 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6444 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6445 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6446 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6447 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6448 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6449 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6450 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6451 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6452 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6453 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6454 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6455 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6456 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6457 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6458 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6459 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6460 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6462 ; GFX11-LABEL: name: sample_lz_1d
6463 ; GFX11: bb.1.main_body:
6464 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
6465 ; GFX11-NEXT: {{ $}}
6466 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6467 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6468 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6469 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6470 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6471 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6472 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6473 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6474 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6475 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6476 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6477 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6478 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6479 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6480 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6481 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6482 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6483 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6484 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6485 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6486 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6487 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6488 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6489 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6490 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6492 ; GFX12-LABEL: name: sample_lz_1d
6493 ; GFX12: bb.1.main_body:
6494 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
6495 ; GFX12-NEXT: {{ $}}
6496 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6497 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6498 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6499 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6500 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6501 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6502 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6503 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6504 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6505 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6506 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6507 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6508 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6509 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6510 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6511 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6512 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6513 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6514 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6515 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6516 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6517 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6518 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6519 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6520 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6522 %v = call <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f16(i32 15, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6526 define amdgpu_ps <4 x float> @sample_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t) {
6527 ; GFX9-LABEL: name: sample_lz_2d
6528 ; GFX9: bb.1.main_body:
6529 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6531 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6532 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6533 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6534 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6535 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6536 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6537 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6538 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6539 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6540 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6541 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6542 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6543 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6544 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6545 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6546 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6547 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6548 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6549 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6550 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
6551 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6552 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
6553 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
6554 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
6555 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
6556 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6558 ; GFX10-LABEL: name: sample_lz_2d
6559 ; GFX10: bb.1.main_body:
6560 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6561 ; GFX10-NEXT: {{ $}}
6562 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6563 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6564 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6565 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6566 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6567 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6568 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6569 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6570 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6571 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6572 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6573 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6574 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6575 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6576 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6577 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6578 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6579 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6580 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6581 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6582 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6583 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6584 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6585 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6586 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6587 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6589 ; GFX11-LABEL: name: sample_lz_2d
6590 ; GFX11: bb.1.main_body:
6591 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6592 ; GFX11-NEXT: {{ $}}
6593 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6594 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6595 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6596 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6597 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6598 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6599 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6600 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6601 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6602 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6603 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6604 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6605 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6606 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6607 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6608 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6609 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6610 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6611 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6612 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6613 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6614 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6615 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6616 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6617 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6618 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6620 ; GFX12-LABEL: name: sample_lz_2d
6621 ; GFX12: bb.1.main_body:
6622 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6623 ; GFX12-NEXT: {{ $}}
6624 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6625 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6626 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6627 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6628 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6629 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6630 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6631 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6632 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6633 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6634 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6635 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6636 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6637 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6638 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6639 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
6640 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6641 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6642 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6643 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6644 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6645 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6646 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6647 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6648 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6649 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6651 %v = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f16(i32 15, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6655 define amdgpu_ps <4 x float> @sample_c_lz_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s) {
6656 ; GFX9-LABEL: name: sample_c_lz_1d
6657 ; GFX9: bb.1.main_body:
6658 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6660 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6661 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6662 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6663 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6664 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6665 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6666 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6667 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6668 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6669 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6670 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6671 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6672 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6673 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6674 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6675 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6676 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6677 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6678 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6679 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6680 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6681 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
6682 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6683 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
6684 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
6685 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
6686 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
6687 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6689 ; GFX10-LABEL: name: sample_c_lz_1d
6690 ; GFX10: bb.1.main_body:
6691 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6692 ; GFX10-NEXT: {{ $}}
6693 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6694 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6695 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6696 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6697 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6698 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6699 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6700 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6701 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6702 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6703 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6704 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6705 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6706 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6707 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6708 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6709 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6710 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6711 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6712 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6713 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6714 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6715 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6716 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6717 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6718 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6719 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6720 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6722 ; GFX11-LABEL: name: sample_c_lz_1d
6723 ; GFX11: bb.1.main_body:
6724 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6725 ; GFX11-NEXT: {{ $}}
6726 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6727 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6728 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6729 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6730 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6731 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6732 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6733 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6734 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6735 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6736 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6737 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6738 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6739 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6740 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6741 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6742 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6743 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6744 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6745 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6746 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6747 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6748 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6749 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6750 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6751 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6752 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6753 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6755 ; GFX12-LABEL: name: sample_c_lz_1d
6756 ; GFX12: bb.1.main_body:
6757 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
6758 ; GFX12-NEXT: {{ $}}
6759 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6760 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6761 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6762 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6763 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6764 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6765 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6766 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6767 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6768 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6769 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6770 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6771 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6772 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6773 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6774 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6775 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6776 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6777 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6778 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
6779 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6780 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6781 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6782 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6783 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6784 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6785 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6787 %v = call <4 x float> @llvm.amdgcn.image.sample.c.lz.1d.v4f32.f16(i32 15, float %zcompare, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6791 define amdgpu_ps <4 x float> @sample_c_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t) {
6792 ; GFX9-LABEL: name: sample_c_lz_2d
6793 ; GFX9: bb.1.main_body:
6794 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6796 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6797 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6798 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6799 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6800 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6801 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6802 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6803 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6804 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6805 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6806 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6807 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6808 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6809 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6810 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6811 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6812 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6813 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6814 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6815 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6816 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6817 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6818 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
6819 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6820 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
6821 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
6822 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
6823 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
6824 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6826 ; GFX10-LABEL: name: sample_c_lz_2d
6827 ; GFX10: bb.1.main_body:
6828 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6829 ; GFX10-NEXT: {{ $}}
6830 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6831 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6832 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6833 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6834 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6835 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6836 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6837 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6838 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6839 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6840 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6841 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6842 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6843 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6844 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6845 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6846 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6847 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6848 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6849 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6850 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6851 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6852 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6853 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6854 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
6855 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
6856 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
6857 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
6858 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6860 ; GFX11-LABEL: name: sample_c_lz_2d
6861 ; GFX11: bb.1.main_body:
6862 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6863 ; GFX11-NEXT: {{ $}}
6864 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6865 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6866 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6867 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6868 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6869 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6870 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6871 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6872 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6873 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6874 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6875 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6876 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6877 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6878 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6879 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6880 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6881 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6882 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6883 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6884 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6885 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
6886 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6887 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6888 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
6889 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
6890 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
6891 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
6892 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6894 ; GFX12-LABEL: name: sample_c_lz_2d
6895 ; GFX12: bb.1.main_body:
6896 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
6897 ; GFX12-NEXT: {{ $}}
6898 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6899 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6900 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6901 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6902 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6903 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6904 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6905 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6906 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6907 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6908 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6909 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6910 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6911 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6912 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6913 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6914 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
6915 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6916 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6917 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6918 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6919 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
6920 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
6921 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
6922 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
6923 ; GFX12-NEXT: $vgpr2 = COPY [[UV2]](s32)
6924 ; GFX12-NEXT: $vgpr3 = COPY [[UV3]](s32)
6925 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
6927 %v = call <4 x float> @llvm.amdgcn.image.sample.c.lz.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
6931 define amdgpu_ps float @sample_c_d_o_2darray_V1(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice) {
6932 ; GFX9-LABEL: name: sample_c_d_o_2darray_V1
6933 ; GFX9: bb.1.main_body:
6934 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
6936 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6937 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6938 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6939 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6940 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6941 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6942 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6943 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6944 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6945 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6946 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6947 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6948 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6949 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6950 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6951 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6952 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6953 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
6954 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
6955 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
6956 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
6957 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
6958 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
6959 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
6960 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
6961 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
6962 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
6963 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
6964 ; GFX9-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
6965 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
6966 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
6967 ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
6968 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
6969 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
6970 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
6971 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
6972 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
6973 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
6974 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
6975 ; GFX9-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
6976 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
6978 ; GFX10-LABEL: name: sample_c_d_o_2darray_V1
6979 ; GFX10: bb.1.main_body:
6980 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
6981 ; GFX10-NEXT: {{ $}}
6982 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
6983 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
6984 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
6985 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
6986 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
6987 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
6988 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
6989 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
6990 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
6991 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
6992 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
6993 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
6994 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
6995 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
6996 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
6997 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
6998 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
6999 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7000 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7001 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7002 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7003 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7004 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7005 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7006 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7007 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7008 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7009 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7010 ; GFX10-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7011 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7012 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7013 ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7014 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7015 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7016 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7017 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7018 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7019 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7020 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
7021 ; GFX10-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
7022 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
7024 ; GFX11-LABEL: name: sample_c_d_o_2darray_V1
7025 ; GFX11: bb.1.main_body:
7026 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
7027 ; GFX11-NEXT: {{ $}}
7028 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
7029 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
7030 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
7031 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
7032 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
7033 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
7034 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
7035 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
7036 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
7037 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
7038 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
7039 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
7040 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
7041 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
7042 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
7043 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
7044 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
7045 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7046 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7047 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7048 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7049 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7050 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7051 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7052 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7053 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7054 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7055 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7056 ; GFX11-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7057 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7058 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7059 ; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7060 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7061 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7062 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7063 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7064 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7065 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7066 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
7067 ; GFX11-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
7068 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
7070 ; GFX12-LABEL: name: sample_c_d_o_2darray_V1
7071 ; GFX12: bb.1.main_body:
7072 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
7073 ; GFX12-NEXT: {{ $}}
7074 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
7075 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
7076 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
7077 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
7078 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
7079 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
7080 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
7081 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
7082 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
7083 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
7084 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
7085 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
7086 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
7087 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
7088 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
7089 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
7090 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
7091 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7092 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7093 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7094 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7095 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7096 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7097 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7098 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7099 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7100 ; GFX12-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7101 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7102 ; GFX12-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7103 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7104 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7105 ; GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7106 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7107 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7108 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7109 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7110 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7111 ; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7112 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
7113 ; GFX12-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
7114 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
7116 %v = call float @llvm.amdgcn.image.sample.c.d.o.2darray.f32.f16.f16(i32 4, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
7120 define amdgpu_ps <2 x float> @sample_c_d_o_2darray_V2(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice) {
7121 ; GFX9-LABEL: name: sample_c_d_o_2darray_V2
7122 ; GFX9: bb.1.main_body:
7123 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
7125 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
7126 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
7127 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
7128 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
7129 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
7130 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
7131 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
7132 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
7133 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
7134 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
7135 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
7136 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
7137 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
7138 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
7139 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
7140 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
7141 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
7142 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7143 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7144 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7145 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7146 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7147 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7148 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7149 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7150 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7151 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7152 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7153 ; GFX9-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7154 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7155 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7156 ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7157 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7158 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7159 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7160 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7161 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7162 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7163 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
7164 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
7165 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
7166 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
7167 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
7169 ; GFX10-LABEL: name: sample_c_d_o_2darray_V2
7170 ; GFX10: bb.1.main_body:
7171 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
7172 ; GFX10-NEXT: {{ $}}
7173 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
7174 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
7175 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
7176 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
7177 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
7178 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
7179 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
7180 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
7181 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
7182 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
7183 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
7184 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
7185 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
7186 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
7187 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
7188 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
7189 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
7190 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7191 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7192 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7193 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7194 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7195 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7196 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7197 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7198 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7199 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7200 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7201 ; GFX10-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7202 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7203 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7204 ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7205 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7206 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7207 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7208 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7209 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7210 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7211 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
7212 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
7213 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
7214 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
7215 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
7217 ; GFX11-LABEL: name: sample_c_d_o_2darray_V2
7218 ; GFX11: bb.1.main_body:
7219 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
7220 ; GFX11-NEXT: {{ $}}
7221 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
7222 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
7223 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
7224 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
7225 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
7226 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
7227 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
7228 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
7229 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
7230 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
7231 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
7232 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
7233 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
7234 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
7235 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
7236 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
7237 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
7238 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7239 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7240 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7241 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7242 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7243 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7244 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7245 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7246 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7247 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7248 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7249 ; GFX11-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7250 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7251 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7252 ; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7253 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7254 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7255 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7256 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7257 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7258 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7259 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
7260 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
7261 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
7262 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
7263 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
7265 ; GFX12-LABEL: name: sample_c_d_o_2darray_V2
7266 ; GFX12: bb.1.main_body:
7267 ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
7268 ; GFX12-NEXT: {{ $}}
7269 ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
7270 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
7271 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
7272 ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
7273 ; GFX12-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
7274 ; GFX12-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
7275 ; GFX12-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
7276 ; GFX12-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
7277 ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
7278 ; GFX12-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
7279 ; GFX12-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
7280 ; GFX12-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
7281 ; GFX12-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
7282 ; GFX12-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
7283 ; GFX12-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
7284 ; GFX12-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
7285 ; GFX12-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
7286 ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
7287 ; GFX12-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
7288 ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
7289 ; GFX12-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
7290 ; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
7291 ; GFX12-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
7292 ; GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
7293 ; GFX12-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
7294 ; GFX12-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
7295 ; GFX12-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
7296 ; GFX12-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
7297 ; GFX12-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
7298 ; GFX12-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
7299 ; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
7300 ; GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
7301 ; GFX12-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
7302 ; GFX12-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
7303 ; GFX12-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
7304 ; GFX12-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
7305 ; GFX12-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
7306 ; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
7307 ; GFX12-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
7308 ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
7309 ; GFX12-NEXT: $vgpr0 = COPY [[UV]](s32)
7310 ; GFX12-NEXT: $vgpr1 = COPY [[UV1]](s32)
7311 ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
7313 %v = call <2 x float> @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32.f32.f16(i32 6, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
7317 declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f16(i32, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7318 declare <8 x float> @llvm.amdgcn.image.sample.1d.v8f32.f16(i32, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7319 declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7320 declare <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7321 declare <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7322 declare <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7323 declare <4 x float> @llvm.amdgcn.image.sample.2darray.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7325 declare <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f16(i32, float, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7326 declare <4 x float> @llvm.amdgcn.image.sample.c.2d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7327 declare <4 x float> @llvm.amdgcn.image.sample.cl.1d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7328 declare <4 x float> @llvm.amdgcn.image.sample.cl.2d.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7329 declare <4 x float> @llvm.amdgcn.image.sample.c.cl.1d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7330 declare <4 x float> @llvm.amdgcn.image.sample.c.cl.2d.v4f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7332 declare <4 x float> @llvm.amdgcn.image.sample.b.1d.v4f32.f16.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7333 declare <4 x float> @llvm.amdgcn.image.sample.b.2d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7334 declare <4 x float> @llvm.amdgcn.image.sample.c.b.1d.v4f32.f16.f16(i32, half, float, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7335 declare <4 x float> @llvm.amdgcn.image.sample.c.b.2d.v4f32.f16.f16(i32, half, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7336 declare <4 x float> @llvm.amdgcn.image.sample.b.cl.1d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7337 declare <4 x float> @llvm.amdgcn.image.sample.b.cl.2d.v4f32.f16.f16(i32, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7338 declare <4 x float> @llvm.amdgcn.image.sample.c.b.cl.1d.v4f32.f16.f16(i32, half, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7339 declare <4 x float> @llvm.amdgcn.image.sample.c.b.cl.2d.v4f32.f16.f16(i32, half, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7341 declare <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7342 declare <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7343 declare <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f16.f16(i32, half, half, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7344 declare <4 x float> @llvm.amdgcn.image.sample.c.d.1d.v4f32.f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7345 declare <4 x float> @llvm.amdgcn.image.sample.c.d.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7346 declare <4 x float> @llvm.amdgcn.image.sample.d.cl.1d.v4f32.f16.f16(i32, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7347 declare <4 x float> @llvm.amdgcn.image.sample.d.cl.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7348 declare <4 x float> @llvm.amdgcn.image.sample.c.d.cl.1d.v4f32.f32.f16(i32, float, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7349 declare <4 x float> @llvm.amdgcn.image.sample.c.d.cl.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7351 declare <4 x float> @llvm.amdgcn.image.sample.cd.1d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7352 declare <4 x float> @llvm.amdgcn.image.sample.cd.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7353 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.1d.v4f32.f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7354 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7355 declare <4 x float> @llvm.amdgcn.image.sample.cd.cl.1d.v4f32.f16.f16(i32, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7356 declare <4 x float> @llvm.amdgcn.image.sample.cd.cl.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7357 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.1d.v4f32.f32.f16(i32, float, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7358 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7360 declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7361 declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7362 declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7363 declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7365 declare <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f16(i32, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7366 declare <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7367 declare <4 x float> @llvm.amdgcn.image.sample.c.lz.1d.v4f32.f16(i32, float, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7368 declare <4 x float> @llvm.amdgcn.image.sample.c.lz.2d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7370 declare float @llvm.amdgcn.image.sample.c.d.o.2darray.f32.f16.f16(i32, i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7371 declare <2 x float> @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32.f32.f16(i32, i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
7373 attributes #0 = { nounwind }
7374 attributes #1 = { nounwind readonly }
7375 attributes #2 = { nounwind readnone }