1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
3 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
4 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX11 %s
6 define amdgpu_ps <4 x float> @sample_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s) {
7 ; GFX9-LABEL: name: sample_1d
8 ; GFX9: bb.1.main_body:
9 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
11 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
12 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
13 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
14 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
15 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
16 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
17 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
18 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
19 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
20 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
21 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
22 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
23 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
24 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
25 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
26 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
27 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
28 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
29 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
30 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
31 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
32 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
33 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
34 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
35 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
37 ; GFX10-LABEL: name: sample_1d
38 ; GFX10: bb.1.main_body:
39 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
41 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
42 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
43 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
44 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
45 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
46 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
47 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
48 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
49 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
50 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
51 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
52 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
53 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
54 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
55 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
56 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
57 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
58 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
59 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
60 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
61 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
62 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
63 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
64 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
65 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
67 ; GFX11-LABEL: name: sample_1d
68 ; GFX11: bb.1.main_body:
69 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
71 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
72 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
73 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
74 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
75 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
76 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
77 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
78 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
79 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
80 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
81 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
82 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
83 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
84 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
85 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
86 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
87 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
88 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
89 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
90 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
91 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
92 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
93 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
94 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
95 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
97 %v = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f16(i32 15, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
101 define amdgpu_ps <4 x float> @sample_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t) {
102 ; GFX9-LABEL: name: sample_2d
103 ; GFX9: bb.1.main_body:
104 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
106 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
107 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
108 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
109 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
110 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
111 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
112 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
113 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
114 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
115 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
116 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
117 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
118 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
119 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
120 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
121 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
122 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
123 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
124 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
125 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
126 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
127 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
128 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
129 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
130 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
131 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
133 ; GFX10-LABEL: name: sample_2d
134 ; GFX10: bb.1.main_body:
135 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
137 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
138 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
139 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
140 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
141 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
142 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
143 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
144 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
145 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
146 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
147 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
148 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
149 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
150 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
151 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
152 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
153 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
154 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
155 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
156 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
157 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
158 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
159 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
160 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
161 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
162 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
164 ; GFX11-LABEL: name: sample_2d
165 ; GFX11: bb.1.main_body:
166 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
168 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
169 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
170 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
171 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
172 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
173 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
174 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
175 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
176 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
177 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
178 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
179 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
180 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
181 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
182 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
183 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
184 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
185 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
186 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
187 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
188 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
189 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
190 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
191 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
192 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
193 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
195 %v = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f16(i32 15, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
199 define amdgpu_ps <4 x float> @sample_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %r) {
200 ; GFX9-LABEL: name: sample_3d
201 ; GFX9: bb.1.main_body:
202 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
204 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
205 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
206 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
207 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
208 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
209 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
210 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
211 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
212 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
213 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
214 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
215 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
216 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
217 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
218 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
219 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
220 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
221 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
222 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
223 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
224 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
225 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
226 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
227 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
228 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
229 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
230 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
231 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
232 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
233 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
234 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
236 ; GFX10-LABEL: name: sample_3d
237 ; GFX10: bb.1.main_body:
238 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
240 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
241 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
242 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
243 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
244 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
245 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
246 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
247 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
248 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
249 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
250 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
251 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
252 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
253 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
254 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
255 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
256 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
257 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
258 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
259 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
260 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
261 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
262 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
263 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
264 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
265 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
266 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
267 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
268 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
269 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
270 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
272 ; GFX11-LABEL: name: sample_3d
273 ; GFX11: bb.1.main_body:
274 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
276 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
277 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
278 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
279 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
280 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
281 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
282 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
283 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
284 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
285 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
286 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
287 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
288 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
289 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
290 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
291 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
292 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
293 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
294 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
295 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
296 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
297 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
298 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
299 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
300 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
301 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
302 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
303 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
304 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
305 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
306 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
308 %v = call <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f16(i32 15, half %s, half %t, half %r, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
312 define amdgpu_ps <4 x float> @sample_cube(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %face) {
313 ; GFX9-LABEL: name: sample_cube
314 ; GFX9: bb.1.main_body:
315 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
317 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
318 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
319 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
320 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
321 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
322 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
323 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
324 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
325 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
326 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
327 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
328 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
329 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
330 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
331 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
332 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
333 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
334 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
335 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
336 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
337 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
338 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
339 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
340 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
341 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
342 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
343 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
344 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
345 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
346 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
347 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
349 ; GFX10-LABEL: name: sample_cube
350 ; GFX10: bb.1.main_body:
351 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
353 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
354 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
355 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
356 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
357 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
358 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
359 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
360 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
361 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
362 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
363 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
364 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
365 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
366 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
367 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
368 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
369 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
370 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
371 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
372 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
373 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
374 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
375 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
376 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
377 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
378 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
379 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
380 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
381 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
382 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
383 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
385 ; GFX11-LABEL: name: sample_cube
386 ; GFX11: bb.1.main_body:
387 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
389 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
390 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
391 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
392 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
393 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
394 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
395 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
396 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
397 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
398 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
399 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
400 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
401 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
402 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
403 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
404 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
405 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
406 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
407 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
408 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
409 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
410 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
411 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
412 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
413 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
414 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
415 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
416 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
417 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
418 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
419 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
421 %v = call <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f16(i32 15, half %s, half %t, half %face, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
425 define amdgpu_ps <4 x float> @sample_1darray(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %slice) {
426 ; GFX9-LABEL: name: sample_1darray
427 ; GFX9: bb.1.main_body:
428 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
430 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
431 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
432 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
433 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
434 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
435 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
436 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
437 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
438 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
439 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
440 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
441 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
442 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
443 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
444 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
445 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
446 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
447 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
448 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
449 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
450 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
451 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
452 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
453 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
454 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
455 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
457 ; GFX10-LABEL: name: sample_1darray
458 ; GFX10: bb.1.main_body:
459 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
461 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
462 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
463 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
464 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
465 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
466 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
467 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
468 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
469 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
470 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
471 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
472 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
473 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
474 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
475 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
476 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
477 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
478 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
479 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
480 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
481 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
482 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
483 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
484 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
485 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
486 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
488 ; GFX11-LABEL: name: sample_1darray
489 ; GFX11: bb.1.main_body:
490 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
492 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
493 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
494 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
495 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
496 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
497 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
498 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
499 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
500 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
501 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
502 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
503 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
504 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
505 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
506 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
507 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
508 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
509 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
510 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
511 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.1darray), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
512 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
513 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
514 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
515 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
516 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
517 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
519 %v = call <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f16(i32 15, half %s, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
523 define amdgpu_ps <4 x float> @sample_2darray(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %slice) {
524 ; GFX9-LABEL: name: sample_2darray
525 ; GFX9: bb.1.main_body:
526 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
528 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
529 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
530 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
531 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
532 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
533 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
534 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
535 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
536 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
537 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
538 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
539 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
540 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
541 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
542 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
543 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
544 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
545 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
546 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
547 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
548 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
549 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
550 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
551 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
552 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
553 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
554 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
555 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
556 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
557 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
558 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
560 ; GFX10-LABEL: name: sample_2darray
561 ; GFX10: bb.1.main_body:
562 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
564 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
565 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
566 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
567 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
568 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
569 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
570 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
571 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
572 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
573 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
574 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
575 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
576 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
577 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
578 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
579 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
580 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
581 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
582 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
583 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
584 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
585 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
586 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
587 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
588 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
589 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
590 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
591 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
592 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
593 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
594 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
596 ; GFX11-LABEL: name: sample_2darray
597 ; GFX11: bb.1.main_body:
598 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
600 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
601 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
602 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
603 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
604 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
605 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
606 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
607 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
608 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
609 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
610 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
611 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
612 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
613 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
614 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
615 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
616 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
617 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
618 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
619 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
620 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
621 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
622 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
623 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
624 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
625 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
626 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
627 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
628 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
629 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
630 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
632 %v = call <4 x float> @llvm.amdgcn.image.sample.2darray.v4f32.f16(i32 15, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
636 define amdgpu_ps <4 x float> @sample_c_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s) {
637 ; GFX9-LABEL: name: sample_c_1d
638 ; GFX9: bb.1.main_body:
639 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
641 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
642 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
643 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
644 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
645 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
646 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
647 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
648 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
649 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
650 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
651 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
652 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
653 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
654 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
655 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
656 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
657 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
658 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
659 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
660 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
661 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
662 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
663 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
664 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
665 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
666 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
667 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
668 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
670 ; GFX10-LABEL: name: sample_c_1d
671 ; GFX10: bb.1.main_body:
672 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
674 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
675 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
676 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
677 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
678 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
679 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
680 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
681 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
682 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
683 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
684 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
685 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
686 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
687 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
688 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
689 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
690 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
691 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
692 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
693 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
694 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
695 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
696 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
697 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
698 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
699 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
700 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
701 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
703 ; GFX11-LABEL: name: sample_c_1d
704 ; GFX11: bb.1.main_body:
705 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
707 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
708 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
709 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
710 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
711 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
712 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
713 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
714 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
715 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
716 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
717 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
718 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
719 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
720 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
721 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
722 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
723 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
724 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
725 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
726 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
727 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
728 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
729 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
730 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
731 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
732 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
733 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
734 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
736 %v = call <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f16(i32 15, float %zcompare, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
740 define amdgpu_ps <4 x float> @sample_c_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t) {
741 ; GFX9-LABEL: name: sample_c_2d
742 ; GFX9: bb.1.main_body:
743 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
745 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
746 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
747 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
748 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
749 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
750 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
751 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
752 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
753 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
754 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
755 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
756 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
757 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
758 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
759 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
760 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
761 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
762 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
763 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
764 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
765 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
766 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
767 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
768 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
769 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
770 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
771 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
772 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
773 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
775 ; GFX10-LABEL: name: sample_c_2d
776 ; GFX10: bb.1.main_body:
777 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
779 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
780 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
781 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
782 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
783 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
784 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
785 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
786 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
787 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
788 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
789 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
790 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
791 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
792 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
793 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
794 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
795 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
796 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
797 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
798 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
799 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
800 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
801 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
802 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
803 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
804 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
805 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
806 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
807 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
809 ; GFX11-LABEL: name: sample_c_2d
810 ; GFX11: bb.1.main_body:
811 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
813 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
814 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
815 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
816 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
817 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
818 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
819 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
820 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
821 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
822 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
823 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
824 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
825 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
826 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
827 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
828 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
829 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
830 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
831 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
832 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
833 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
834 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
835 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
836 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
837 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
838 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
839 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
840 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
841 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
843 %v = call <4 x float> @llvm.amdgcn.image.sample.c.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
847 define amdgpu_ps <4 x float> @sample_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %clamp) {
848 ; GFX9-LABEL: name: sample_cl_1d
849 ; GFX9: bb.1.main_body:
850 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
852 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
853 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
854 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
855 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
856 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
857 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
858 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
859 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
860 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
861 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
862 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
863 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
864 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
865 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
866 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
867 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
868 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
869 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
870 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
871 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
872 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
873 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
874 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
875 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
876 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
877 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
879 ; GFX10-LABEL: name: sample_cl_1d
880 ; GFX10: bb.1.main_body:
881 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
883 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
884 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
885 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
886 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
887 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
888 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
889 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
890 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
891 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
892 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
893 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
894 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
895 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
896 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
897 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
898 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
899 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
900 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
901 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
902 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
903 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
904 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
905 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
906 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
907 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
908 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
910 ; GFX11-LABEL: name: sample_cl_1d
911 ; GFX11: bb.1.main_body:
912 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
914 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
915 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
916 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
917 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
918 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
919 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
920 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
921 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
922 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
923 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
924 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
925 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
926 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
927 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
928 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
929 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
930 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
931 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
932 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
933 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
934 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
935 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
936 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
937 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
938 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
939 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
941 %v = call <4 x float> @llvm.amdgcn.image.sample.cl.1d.v4f32.f16(i32 15, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
945 define amdgpu_ps <4 x float> @sample_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %clamp) {
946 ; GFX9-LABEL: name: sample_cl_2d
947 ; GFX9: bb.1.main_body:
948 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
950 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
951 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
952 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
953 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
954 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
955 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
956 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
957 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
958 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
959 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
960 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
961 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
962 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
963 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
964 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
965 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
966 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
967 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
968 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
969 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
970 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
971 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
972 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
973 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
974 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
975 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
976 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
977 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
978 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
979 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
980 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
982 ; GFX10-LABEL: name: sample_cl_2d
983 ; GFX10: bb.1.main_body:
984 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
986 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
987 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
988 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
989 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
990 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
991 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
992 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
993 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
994 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
995 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
996 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
997 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
998 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
999 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1000 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1001 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1002 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1003 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1004 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1005 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1006 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1007 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1008 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1009 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1010 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1011 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1012 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1013 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1014 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1015 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1016 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1018 ; GFX11-LABEL: name: sample_cl_2d
1019 ; GFX11: bb.1.main_body:
1020 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1021 ; GFX11-NEXT: {{ $}}
1022 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1023 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1024 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1025 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1026 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1027 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1028 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1029 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1030 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1031 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1032 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1033 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1034 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1035 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1036 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1037 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1038 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1039 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1040 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1041 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1042 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1043 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1044 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1045 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1046 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cl.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1047 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1048 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1049 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1050 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1051 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1052 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1054 %v = call <4 x float> @llvm.amdgcn.image.sample.cl.2d.v4f32.f16(i32 15, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1058 define amdgpu_ps <4 x float> @sample_c_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %clamp) {
1059 ; GFX9-LABEL: name: sample_c_cl_1d
1060 ; GFX9: bb.1.main_body:
1061 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1063 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1064 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1065 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1066 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1067 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1068 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1069 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1070 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1071 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1072 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1073 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1074 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1075 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1076 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1077 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1078 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1079 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1080 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1081 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1082 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1083 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1084 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1085 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1086 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1087 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1088 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1089 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1090 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1091 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1093 ; GFX10-LABEL: name: sample_c_cl_1d
1094 ; GFX10: bb.1.main_body:
1095 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1096 ; GFX10-NEXT: {{ $}}
1097 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1098 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1099 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1100 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1101 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1102 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1103 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1104 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1105 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1106 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1107 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1108 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1109 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1110 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1111 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1112 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1113 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1114 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1115 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1116 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1117 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1118 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1119 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1120 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1121 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1122 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1123 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1124 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1125 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1127 ; GFX11-LABEL: name: sample_c_cl_1d
1128 ; GFX11: bb.1.main_body:
1129 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1130 ; GFX11-NEXT: {{ $}}
1131 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1132 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1133 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1134 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1135 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1136 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1137 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1138 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1139 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1140 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1141 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1142 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1143 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1144 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1145 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1146 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1147 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1148 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1149 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1150 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1151 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1152 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
1153 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1154 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1155 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1156 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1157 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1158 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1159 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1161 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cl.1d.v4f32.f16(i32 15, float %zcompare, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1165 define amdgpu_ps <4 x float> @sample_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %clamp) {
1166 ; GFX9-LABEL: name: sample_c_cl_2d
1167 ; GFX9: bb.1.main_body:
1168 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1170 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1171 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1172 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1173 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1174 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1175 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1176 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1177 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1178 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1179 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1180 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1181 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1182 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1183 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1184 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1185 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1186 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1187 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1188 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1189 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1190 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1191 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1192 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1193 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1194 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1195 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1196 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1197 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1198 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1199 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1200 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1201 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1202 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1204 ; GFX10-LABEL: name: sample_c_cl_2d
1205 ; GFX10: bb.1.main_body:
1206 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1207 ; GFX10-NEXT: {{ $}}
1208 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1209 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1210 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1211 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1212 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1213 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1214 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1215 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1216 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1217 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1218 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1219 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1220 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1221 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1222 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1223 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1224 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1225 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1226 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1227 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1228 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1229 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1230 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1231 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1232 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1233 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1234 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1235 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1236 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1237 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1238 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1239 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1241 ; GFX11-LABEL: name: sample_c_cl_2d
1242 ; GFX11: bb.1.main_body:
1243 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1244 ; GFX11-NEXT: {{ $}}
1245 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1246 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1247 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1248 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1249 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1250 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1251 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1252 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1253 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1254 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1255 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1256 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1257 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1258 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1259 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1260 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1261 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1262 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1263 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1264 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1265 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1266 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
1267 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
1268 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1269 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
1270 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1271 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1272 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1273 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1274 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1275 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1276 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1278 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cl.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1282 define amdgpu_ps <4 x float> @sample_b_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s) {
1283 ; GFX9-LABEL: name: sample_b_1d
1284 ; GFX9: bb.1.main_body:
1285 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1287 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1288 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1289 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1290 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1291 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1292 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1293 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1294 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1295 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1296 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1297 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1298 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1299 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1300 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1301 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1302 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1303 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1304 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1305 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1306 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1307 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1308 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1309 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1310 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1311 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1312 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1313 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1314 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1315 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1317 ; GFX10-LABEL: name: sample_b_1d
1318 ; GFX10: bb.1.main_body:
1319 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1320 ; GFX10-NEXT: {{ $}}
1321 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1322 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1323 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1324 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1325 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1326 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1327 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1328 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1329 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1330 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1331 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1332 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1333 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1334 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1335 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1336 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1337 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1338 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1339 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1340 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1341 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1342 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1343 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1344 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1345 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1346 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1347 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1348 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1349 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1351 ; GFX11-LABEL: name: sample_b_1d
1352 ; GFX11: bb.1.main_body:
1353 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
1354 ; GFX11-NEXT: {{ $}}
1355 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1356 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1357 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1358 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1359 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1360 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1361 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1362 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1363 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1364 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1365 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1366 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1367 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1368 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1369 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1370 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1371 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1372 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1373 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1374 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1375 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1376 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1377 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1378 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1379 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1380 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1381 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1382 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1383 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1385 %v = call <4 x float> @llvm.amdgcn.image.sample.b.1d.v4f32.f16.f16(i32 15, half %bias, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1389 define amdgpu_ps <4 x float> @sample_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s, half %t) {
1390 ; GFX9-LABEL: name: sample_b_2d
1391 ; GFX9: bb.1.main_body:
1392 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1394 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1395 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1396 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1397 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1398 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1399 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1400 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1401 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1402 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1403 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1404 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1405 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1406 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1407 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1408 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1409 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1410 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1411 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1412 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1413 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1414 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1415 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1416 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1417 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1418 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1419 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1420 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1421 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1422 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1423 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1424 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1426 ; GFX10-LABEL: name: sample_b_2d
1427 ; GFX10: bb.1.main_body:
1428 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1429 ; GFX10-NEXT: {{ $}}
1430 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1431 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1432 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1433 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1434 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1435 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1436 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1437 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1438 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1439 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1440 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1441 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1442 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1443 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1444 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1445 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1446 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1447 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1448 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1449 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1450 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1451 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1452 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1453 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1454 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1455 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1456 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1457 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1458 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1459 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1460 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1462 ; GFX11-LABEL: name: sample_b_2d
1463 ; GFX11: bb.1.main_body:
1464 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1465 ; GFX11-NEXT: {{ $}}
1466 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1467 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1468 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1469 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1470 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1471 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1472 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1473 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1474 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1475 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1476 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1477 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1478 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1479 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1480 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1481 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1482 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1483 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1484 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1485 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1486 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1487 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1488 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1489 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1490 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1491 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1492 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1493 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1494 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1495 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1496 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1498 %v = call <4 x float> @llvm.amdgcn.image.sample.b.2d.v4f32.f16.f16(i32 15, half %bias, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1502 define amdgpu_ps <4 x float> @sample_c_b_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s) {
1503 ; GFX9-LABEL: name: sample_c_b_1d
1504 ; GFX9: bb.1.main_body:
1505 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1507 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1508 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1509 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1510 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1511 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1512 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1513 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1514 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1515 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1516 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1517 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1518 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1519 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1520 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1521 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1522 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1523 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1524 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1525 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1526 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1527 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1528 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1529 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1530 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1531 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1532 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1533 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1534 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1535 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1536 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1537 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1539 ; GFX10-LABEL: name: sample_c_b_1d
1540 ; GFX10: bb.1.main_body:
1541 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1542 ; GFX10-NEXT: {{ $}}
1543 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1544 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1545 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1546 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1547 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1548 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1549 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1550 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1551 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1552 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1553 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1554 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1555 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1556 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1557 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1558 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1559 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1560 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1561 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1562 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1563 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1564 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1565 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1566 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1567 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1568 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1569 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1570 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1571 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1572 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1574 ; GFX11-LABEL: name: sample_c_b_1d
1575 ; GFX11: bb.1.main_body:
1576 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1577 ; GFX11-NEXT: {{ $}}
1578 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1579 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1580 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1581 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1582 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1583 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1584 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1585 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1586 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1587 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1588 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1589 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1590 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1591 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1592 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1593 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1594 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1595 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1596 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1597 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1598 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1599 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1600 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
1601 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1602 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1603 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1604 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1605 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1606 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1607 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1609 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.1d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1613 define amdgpu_ps <4 x float> @sample_c_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s, half %t) {
1614 ; GFX9-LABEL: name: sample_c_b_2d
1615 ; GFX9: bb.1.main_body:
1616 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1618 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1619 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1620 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1621 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1622 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1623 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1624 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1625 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1626 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1627 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1628 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1629 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1630 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1631 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1632 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1633 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1634 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1635 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1636 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1637 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1638 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1639 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1640 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1641 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1642 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1643 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1644 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1645 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1646 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1647 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1648 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1649 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1650 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1652 ; GFX10-LABEL: name: sample_c_b_2d
1653 ; GFX10: bb.1.main_body:
1654 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1655 ; GFX10-NEXT: {{ $}}
1656 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1657 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1658 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1659 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1660 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1661 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1662 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1663 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1664 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1665 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1666 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1667 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1668 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1669 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1670 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1671 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1672 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1673 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1674 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1675 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1676 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1677 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1678 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1679 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1680 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1681 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1682 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1683 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1684 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1685 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1686 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1687 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1689 ; GFX11-LABEL: name: sample_c_b_2d
1690 ; GFX11: bb.1.main_body:
1691 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1692 ; GFX11-NEXT: {{ $}}
1693 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1694 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1695 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1696 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1697 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1698 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1699 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1700 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1701 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1702 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1703 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1704 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1705 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1706 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1707 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1708 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1709 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1710 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1711 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1712 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1713 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1714 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1715 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1716 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1717 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1718 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1719 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1720 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1721 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1722 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1723 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1724 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1726 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.2d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1730 define amdgpu_ps <4 x float> @sample_b_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s, half %clamp) {
1731 ; GFX9-LABEL: name: sample_b_cl_1d
1732 ; GFX9: bb.1.main_body:
1733 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1735 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1736 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1737 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1738 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1739 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1740 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1741 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1742 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1743 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1744 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1745 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1746 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1747 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1748 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1749 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1750 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1751 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1752 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1753 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1754 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1755 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1756 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1757 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1758 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1759 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1760 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1761 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1762 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1763 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1764 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1765 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1767 ; GFX10-LABEL: name: sample_b_cl_1d
1768 ; GFX10: bb.1.main_body:
1769 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1770 ; GFX10-NEXT: {{ $}}
1771 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1772 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1773 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1774 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1775 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1776 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1777 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1778 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1779 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1780 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1781 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1782 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1783 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1784 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1785 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1786 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1787 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1788 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1789 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1790 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1791 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1792 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1793 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1794 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1795 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1796 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1797 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1798 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1799 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1800 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1801 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1803 ; GFX11-LABEL: name: sample_b_cl_1d
1804 ; GFX11: bb.1.main_body:
1805 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
1806 ; GFX11-NEXT: {{ $}}
1807 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1808 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1809 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1810 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1811 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1812 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1813 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1814 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1815 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1816 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1817 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1818 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1819 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1820 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1821 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1822 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1823 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1824 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1825 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1826 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1827 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1828 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1829 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1830 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1831 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1832 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1833 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1834 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1835 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1836 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1837 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1839 %v = call <4 x float> @llvm.amdgcn.image.sample.b.cl.1d.v4f32.f16.f16(i32 15, half %bias, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1843 define amdgpu_ps <4 x float> @sample_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, half %s, half %t, half %clamp) {
1844 ; GFX9-LABEL: name: sample_b_cl_2d
1845 ; GFX9: bb.1.main_body:
1846 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1848 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1849 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1850 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1851 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1852 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1853 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1854 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1855 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1856 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1857 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1858 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1859 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1860 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1861 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1862 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1863 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1864 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1865 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1866 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1867 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1868 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1869 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1870 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1871 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1872 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1873 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
1874 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
1875 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1876 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1877 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1878 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1879 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1880 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
1881 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1883 ; GFX10-LABEL: name: sample_b_cl_2d
1884 ; GFX10: bb.1.main_body:
1885 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1886 ; GFX10-NEXT: {{ $}}
1887 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1888 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1889 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1890 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1891 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1892 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1893 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1894 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1895 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1896 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1897 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1898 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1899 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1900 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1901 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1902 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1903 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1904 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1905 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1906 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1907 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1908 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1909 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1910 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1911 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1912 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
1913 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1914 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1915 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
1916 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
1917 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
1918 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
1919 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1921 ; GFX11-LABEL: name: sample_b_cl_2d
1922 ; GFX11: bb.1.main_body:
1923 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1924 ; GFX11-NEXT: {{ $}}
1925 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1926 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1927 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1928 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1929 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1930 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1931 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1932 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1933 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1934 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1935 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1936 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1937 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1938 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1939 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1940 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1941 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1942 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
1943 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1944 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1945 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1946 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1947 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1948 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1949 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1950 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
1951 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
1952 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1953 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
1954 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
1955 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
1956 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
1957 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1959 %v = call <4 x float> @llvm.amdgcn.image.sample.b.cl.2d.v4f32.f16.f16(i32 15, half %bias, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
1963 define amdgpu_ps <4 x float> @sample_c_b_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s, half %clamp) {
1964 ; GFX9-LABEL: name: sample_c_b_cl_1d
1965 ; GFX9: bb.1.main_body:
1966 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
1968 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
1969 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
1970 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
1971 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
1972 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
1973 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
1974 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
1975 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
1976 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1977 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
1978 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
1979 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
1980 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
1981 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
1982 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
1983 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
1984 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
1985 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
1986 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
1987 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
1988 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
1989 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1990 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
1991 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
1992 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
1993 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
1994 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
1995 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
1996 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
1997 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
1998 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
1999 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2000 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2002 ; GFX10-LABEL: name: sample_c_b_cl_1d
2003 ; GFX10: bb.1.main_body:
2004 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2005 ; GFX10-NEXT: {{ $}}
2006 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2007 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2008 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2009 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2010 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2011 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2012 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2013 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2014 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2015 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2016 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2017 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2018 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2019 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2020 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2021 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2022 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2023 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2024 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2025 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2026 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2027 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2028 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2029 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2030 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2031 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2032 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2033 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2034 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2035 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2036 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2037 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2039 ; GFX11-LABEL: name: sample_c_b_cl_1d
2040 ; GFX11: bb.1.main_body:
2041 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2042 ; GFX11-NEXT: {{ $}}
2043 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2044 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2045 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2046 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2047 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2048 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2049 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2050 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2051 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2052 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2053 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2054 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2055 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2056 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2057 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2058 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2059 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2060 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2061 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2062 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2063 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2064 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2065 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2066 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2067 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2068 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2069 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2070 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2071 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2072 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2073 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2074 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2076 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.1d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2080 define amdgpu_ps <4 x float> @sample_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %bias, float %zcompare, half %s, half %t, half %clamp) {
2081 ; GFX9-LABEL: name: sample_c_b_cl_2d
2082 ; GFX9: bb.1.main_body:
2083 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2085 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2086 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2087 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2088 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2089 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2090 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2091 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2092 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2093 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2094 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2095 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2096 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2097 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2098 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2099 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2100 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2101 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2102 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2103 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2104 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2105 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2106 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2107 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2108 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2109 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2110 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2111 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2112 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2113 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2114 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2115 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2116 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2117 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2118 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2119 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2120 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2122 ; GFX10-LABEL: name: sample_c_b_cl_2d
2123 ; GFX10: bb.1.main_body:
2124 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2125 ; GFX10-NEXT: {{ $}}
2126 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2127 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2128 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2129 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2130 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2131 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2132 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2133 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2134 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2135 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2136 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2137 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2138 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2139 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2140 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2141 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2142 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2143 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2144 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2145 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2146 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2147 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2148 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2149 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2150 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2151 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2152 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2153 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2154 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2155 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2156 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2157 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2158 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2159 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2160 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2162 ; GFX11-LABEL: name: sample_c_b_cl_2d
2163 ; GFX11: bb.1.main_body:
2164 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
2165 ; GFX11-NEXT: {{ $}}
2166 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2167 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2168 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2169 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2170 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2171 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2172 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2173 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2174 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2175 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2176 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2177 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2178 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2179 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2180 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2181 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2182 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2183 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2184 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2185 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2186 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2187 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2188 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2189 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2190 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2191 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
2192 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
2193 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[DEF]](s16)
2194 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.b.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BITCAST]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
2195 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2196 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2197 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2198 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2199 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2200 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2202 %v = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.2d.v4f32.f16.f16(i32 15, half %bias, float %zcompare, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2206 define amdgpu_ps <4 x float> @sample_d_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s) {
2207 ; GFX9-LABEL: name: sample_d_1d
2208 ; GFX9: bb.1.main_body:
2209 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2211 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2212 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2213 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2214 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2215 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2216 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2217 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2218 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2219 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2220 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2221 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2222 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2223 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2224 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2225 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2226 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2227 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2228 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2229 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2230 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2231 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2232 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2233 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2234 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2235 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2236 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2237 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2238 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2239 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2240 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2241 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2242 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2244 ; GFX10-LABEL: name: sample_d_1d
2245 ; GFX10: bb.1.main_body:
2246 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2247 ; GFX10-NEXT: {{ $}}
2248 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2249 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2250 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2251 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2252 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2253 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2254 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2255 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2256 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2257 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2258 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2259 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2260 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2261 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2262 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2263 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2264 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2265 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2266 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2267 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2268 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2269 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2270 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2271 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2272 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2273 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2274 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2275 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2276 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2277 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2278 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2280 ; GFX11-LABEL: name: sample_d_1d
2281 ; GFX11: bb.1.main_body:
2282 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
2283 ; GFX11-NEXT: {{ $}}
2284 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2285 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2286 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2287 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2288 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2289 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2290 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2291 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2292 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2293 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2294 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2295 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2296 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2297 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2298 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2299 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2300 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2301 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2302 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2303 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2304 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2305 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2306 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2307 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2308 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2309 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2310 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2311 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2312 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2313 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2314 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2316 %v = call <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2320 define amdgpu_ps <4 x float> @sample_d_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
2321 ; GFX9-LABEL: name: sample_d_2d
2322 ; GFX9: bb.1.main_body:
2323 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
2325 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2326 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2327 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2328 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2329 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2330 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2331 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2332 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2333 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2334 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2335 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2336 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2337 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2338 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2339 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2340 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2341 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2342 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2343 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2344 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2345 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2346 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2347 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2348 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2349 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2350 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2351 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2352 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2353 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
2354 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2355 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2356 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2357 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2358 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2359 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2360 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2361 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2363 ; GFX10-LABEL: name: sample_d_2d
2364 ; GFX10: bb.1.main_body:
2365 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
2366 ; GFX10-NEXT: {{ $}}
2367 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2368 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2369 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2370 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2371 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2372 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2373 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2374 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2375 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2376 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2377 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2378 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2379 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2380 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2381 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2382 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2383 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2384 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2385 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2386 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2387 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2388 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2389 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2390 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2391 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2392 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2393 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2394 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2395 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
2396 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2397 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2398 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2399 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2400 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2401 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2402 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2404 ; GFX11-LABEL: name: sample_d_2d
2405 ; GFX11: bb.1.main_body:
2406 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
2407 ; GFX11-NEXT: {{ $}}
2408 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2409 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2410 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2411 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2412 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2413 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2414 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2415 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2416 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2417 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2418 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2419 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2420 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2421 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2422 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2423 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2424 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2425 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2426 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2427 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2428 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2429 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2430 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2431 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2432 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2433 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2434 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2435 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2436 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
2437 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2438 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2439 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2440 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2441 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2442 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2443 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2445 %v = call <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2449 define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %drdh, half %dsdv, half %dtdv, half %drdv, half %s, half %t, half %r) {
2450 ; GFX9-LABEL: name: sample_d_3d
2451 ; GFX9: bb.1.main_body:
2452 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
2454 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2455 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2456 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2457 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2458 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2459 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2460 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2461 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2462 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2463 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2464 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2465 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2466 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2467 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2468 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2469 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2470 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2471 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2472 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2473 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2474 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2475 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2476 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2477 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2478 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2479 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2480 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
2481 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
2482 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
2483 ; GFX9-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
2484 ; GFX9-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
2485 ; GFX9-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
2486 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2487 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2488 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2489 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
2490 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
2491 ; GFX9-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
2492 ; GFX9-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
2493 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
2494 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2495 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2496 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2497 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2498 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2499 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2500 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2502 ; GFX10-LABEL: name: sample_d_3d
2503 ; GFX10: bb.1.main_body:
2504 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
2505 ; GFX10-NEXT: {{ $}}
2506 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2507 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2508 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2509 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2510 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2511 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2512 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2513 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2514 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2515 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2516 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2517 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2518 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2519 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2520 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2521 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2522 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2523 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2524 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2525 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2526 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2527 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2528 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2529 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2530 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2531 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2532 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
2533 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
2534 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
2535 ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
2536 ; GFX10-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
2537 ; GFX10-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
2538 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2539 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2540 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2541 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
2542 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
2543 ; GFX10-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
2544 ; GFX10-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
2545 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
2546 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2547 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2548 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2549 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2550 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2551 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2552 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2554 ; GFX11-LABEL: name: sample_d_3d
2555 ; GFX11: bb.1.main_body:
2556 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
2557 ; GFX11-NEXT: {{ $}}
2558 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2559 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2560 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2561 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2562 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2563 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2564 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2565 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2566 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2567 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2568 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2569 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2570 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2571 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2572 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2573 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2574 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2575 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2576 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2577 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2578 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2579 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2580 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2581 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2582 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2583 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2584 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
2585 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
2586 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
2587 ; GFX11-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
2588 ; GFX11-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
2589 ; GFX11-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
2590 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2591 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2592 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2593 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
2594 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
2595 ; GFX11-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[TRUNC7]](s16)
2596 ; GFX11-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC8]](s16), [[DEF]](s16)
2597 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR6]](<2 x s16>), [[BUILD_VECTOR7]](<2 x s16>)
2598 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.3d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2599 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2600 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2601 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2602 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2603 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2604 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2606 %v = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %drdh, half %dsdv, half %dtdv, half %drdv, half %s, half %t, half %r, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2610 define amdgpu_ps <4 x float> @sample_c_d_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s) {
2611 ; GFX9-LABEL: name: sample_c_d_1d
2612 ; GFX9: bb.1.main_body:
2613 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2615 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2616 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2617 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2618 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2619 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2620 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2621 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2622 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2623 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2624 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2625 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2626 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2627 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2628 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2629 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2630 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2631 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2632 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2633 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2634 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2635 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2636 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
2637 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2638 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2639 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2640 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2641 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2642 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2643 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2644 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2645 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2646 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2647 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2648 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2650 ; GFX10-LABEL: name: sample_c_d_1d
2651 ; GFX10: bb.1.main_body:
2652 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2653 ; GFX10-NEXT: {{ $}}
2654 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2655 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2656 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2657 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2658 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2659 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2660 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2661 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2662 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2663 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2664 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2665 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2666 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2667 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2668 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2669 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2670 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2671 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2672 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2673 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2674 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2675 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
2676 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2677 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2678 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2679 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2680 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2681 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2682 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2683 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2684 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2685 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2686 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2688 ; GFX11-LABEL: name: sample_c_d_1d
2689 ; GFX11: bb.1.main_body:
2690 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2691 ; GFX11-NEXT: {{ $}}
2692 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2693 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2694 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2695 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2696 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2697 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2698 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2699 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2700 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2701 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2702 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2703 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2704 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2705 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2706 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2707 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2708 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2709 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2710 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2711 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2712 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2713 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
2714 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2715 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2716 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2717 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
2718 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2719 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2720 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2721 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2722 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2723 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2724 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2726 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2730 define amdgpu_ps <4 x float> @sample_c_d_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
2731 ; GFX9-LABEL: name: sample_c_d_2d
2732 ; GFX9: bb.1.main_body:
2733 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
2735 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2736 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2737 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2738 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2739 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2740 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2741 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2742 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2743 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2744 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2745 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2746 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2747 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2748 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2749 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2750 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2751 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2752 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2753 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2754 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2755 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2756 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2757 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2758 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2759 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2760 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
2761 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
2762 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
2763 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2764 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2765 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
2766 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2767 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2768 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2769 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2770 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2771 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2772 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2773 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2775 ; GFX10-LABEL: name: sample_c_d_2d
2776 ; GFX10: bb.1.main_body:
2777 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
2778 ; GFX10-NEXT: {{ $}}
2779 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2780 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2781 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2782 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2783 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2784 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2785 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2786 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2787 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2788 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2789 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2790 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2791 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2792 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2793 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2794 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2795 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2796 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2797 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2798 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2799 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2800 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2801 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2802 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2803 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2804 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
2805 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
2806 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
2807 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2808 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2809 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
2810 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2811 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2812 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2813 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2814 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2815 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2816 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2818 ; GFX11-LABEL: name: sample_c_d_2d
2819 ; GFX11: bb.1.main_body:
2820 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
2821 ; GFX11-NEXT: {{ $}}
2822 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2823 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2824 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2825 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2826 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2827 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2828 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2829 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2830 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2831 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2832 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2833 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2834 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2835 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2836 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2837 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2838 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2839 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2840 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2841 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2842 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2843 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
2844 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
2845 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
2846 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
2847 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
2848 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
2849 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
2850 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
2851 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2852 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
2853 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2854 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2855 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2856 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2857 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2858 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2859 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2861 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2865 define amdgpu_ps <4 x float> @sample_d_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s, half %clamp) {
2866 ; GFX9-LABEL: name: sample_d_cl_1d
2867 ; GFX9: bb.1.main_body:
2868 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2870 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2871 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2872 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2873 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2874 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2875 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2876 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2877 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2878 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2879 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2880 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2881 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2882 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2883 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2884 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2885 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2886 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2887 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2888 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2889 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2890 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2891 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2892 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2893 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2894 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2895 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2896 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
2897 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2898 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2899 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
2900 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
2901 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
2902 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
2903 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2905 ; GFX10-LABEL: name: sample_d_cl_1d
2906 ; GFX10: bb.1.main_body:
2907 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2908 ; GFX10-NEXT: {{ $}}
2909 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2910 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2911 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2912 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2913 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2914 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2915 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2916 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2917 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2918 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2919 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2920 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2921 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2922 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2923 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2924 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2925 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2926 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2927 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2928 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2929 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2930 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2931 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2932 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2933 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2934 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2935 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2936 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2937 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
2938 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
2939 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
2940 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
2941 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2943 ; GFX11-LABEL: name: sample_d_cl_1d
2944 ; GFX11: bb.1.main_body:
2945 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
2946 ; GFX11-NEXT: {{ $}}
2947 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2948 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2949 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2950 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2951 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2952 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2953 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2954 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2955 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2956 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
2957 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
2958 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
2959 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
2960 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
2961 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
2962 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
2963 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
2964 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
2965 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
2966 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
2967 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
2968 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
2969 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
2970 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
2971 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
2972 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
2973 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
2974 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
2975 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
2976 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
2977 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
2978 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
2979 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2981 %v = call <4 x float> @llvm.amdgcn.image.sample.d.cl.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
2985 define amdgpu_ps <4 x float> @sample_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
2986 ; GFX9-LABEL: name: sample_d_cl_2d
2987 ; GFX9: bb.1.main_body:
2988 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
2990 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
2991 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
2992 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
2993 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
2994 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
2995 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
2996 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2997 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2998 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2999 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3000 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3001 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3002 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3003 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3004 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3005 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3006 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3007 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3008 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3009 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3010 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3011 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3012 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3013 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3014 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3015 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3016 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3017 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3018 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3019 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3020 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3021 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3022 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3023 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
3024 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3025 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3026 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3027 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3028 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3029 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3030 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3032 ; GFX10-LABEL: name: sample_d_cl_2d
3033 ; GFX10: bb.1.main_body:
3034 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3035 ; GFX10-NEXT: {{ $}}
3036 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3037 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3038 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3039 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3040 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3041 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3042 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3043 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3044 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3045 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3046 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3047 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3048 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3049 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3050 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3051 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3052 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3053 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3054 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3055 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3056 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3057 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3058 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3059 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3060 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3061 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3062 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3063 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3064 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3065 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3066 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3067 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3068 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3069 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3070 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3071 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3072 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3073 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3074 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3075 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3077 ; GFX11-LABEL: name: sample_d_cl_2d
3078 ; GFX11: bb.1.main_body:
3079 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3080 ; GFX11-NEXT: {{ $}}
3081 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3082 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3083 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3084 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3085 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3086 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3087 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3088 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3089 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3090 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3091 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3092 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3093 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3094 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3095 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3096 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3097 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3098 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3099 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3100 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3101 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3102 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3103 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3104 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3105 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3106 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3107 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3108 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3109 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3110 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3111 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3112 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3113 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3114 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.d.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3115 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3116 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3117 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3118 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3119 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3120 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3122 %v = call <4 x float> @llvm.amdgcn.image.sample.d.cl.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3126 define amdgpu_ps <4 x float> @sample_c_d_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp) {
3127 ; GFX9-LABEL: name: sample_c_d_cl_1d
3128 ; GFX9: bb.1.main_body:
3129 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
3131 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3132 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3133 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3134 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3135 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3136 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3137 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3138 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3139 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3140 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3141 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3142 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3143 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3144 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3145 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3146 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3147 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3148 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3149 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3150 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3151 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3152 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3153 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3154 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3155 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3156 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3157 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3158 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3159 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3160 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3161 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3162 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3163 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3164 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3165 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3166 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3168 ; GFX10-LABEL: name: sample_c_d_cl_1d
3169 ; GFX10: bb.1.main_body:
3170 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
3171 ; GFX10-NEXT: {{ $}}
3172 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3173 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3174 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3175 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3176 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3177 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3178 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3179 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3180 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3181 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3182 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3183 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3184 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3185 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3186 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3187 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3188 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3189 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3190 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3191 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3192 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3193 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3194 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3195 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3196 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3197 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3198 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3199 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3200 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3201 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3202 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3203 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3204 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3205 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3206 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3208 ; GFX11-LABEL: name: sample_c_d_cl_1d
3209 ; GFX11: bb.1.main_body:
3210 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
3211 ; GFX11-NEXT: {{ $}}
3212 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3213 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3214 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3215 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3216 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3217 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3218 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3219 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3220 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3221 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3222 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3223 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3224 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3225 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3226 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3227 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3228 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3229 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3230 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3231 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3232 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3233 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3234 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3235 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3236 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3237 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3238 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3239 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3240 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3241 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3242 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3243 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3244 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3245 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3246 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3248 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3252 define amdgpu_ps <4 x float> @sample_c_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
3253 ; GFX9-LABEL: name: sample_c_d_cl_2d
3254 ; GFX9: bb.1.main_body:
3255 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
3257 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3258 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3259 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3260 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3261 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3262 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3263 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3264 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3265 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3266 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3267 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3268 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3269 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3270 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3271 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3272 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3273 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3274 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3275 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3276 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3277 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3278 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3279 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3280 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3281 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3282 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3283 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3284 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3285 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3286 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3287 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3288 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3289 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3290 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3291 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3292 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
3293 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[CONCAT_VECTORS]](<10 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3294 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3295 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3296 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3297 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3298 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3299 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3301 ; GFX10-LABEL: name: sample_c_d_cl_2d
3302 ; GFX10: bb.1.main_body:
3303 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
3304 ; GFX10-NEXT: {{ $}}
3305 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3306 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3307 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3308 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3309 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3310 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3311 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3312 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3313 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3314 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3315 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3316 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3317 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3318 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3319 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3320 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3321 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3322 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3323 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3324 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3325 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3326 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3327 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3328 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3329 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3330 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3331 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3332 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3333 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3334 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3335 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3336 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3337 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3338 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3339 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3340 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3341 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3342 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3343 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3344 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3345 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3346 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3348 ; GFX11-LABEL: name: sample_c_d_cl_2d
3349 ; GFX11: bb.1.main_body:
3350 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
3351 ; GFX11-NEXT: {{ $}}
3352 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3353 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3354 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3355 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3356 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3357 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3358 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3359 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3360 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3361 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3362 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3363 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3364 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3365 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3366 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3367 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3368 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3369 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3370 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3371 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3372 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3373 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3374 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3375 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3376 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3377 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3378 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3379 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
3380 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
3381 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3382 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3383 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3384 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3385 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3386 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
3387 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3388 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3389 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3390 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3391 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3392 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3393 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3395 %v = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3399 define amdgpu_ps <4 x float> @sample_cd_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s) {
3400 ; GFX9-LABEL: name: sample_cd_1d
3401 ; GFX9: bb.1.main_body:
3402 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
3404 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3405 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3406 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3407 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3408 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3409 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3410 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3411 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3412 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3413 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3414 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3415 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3416 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3417 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3418 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3419 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3420 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3421 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3422 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3423 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3424 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3425 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3426 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3427 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3428 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3429 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3430 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3431 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3432 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3433 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3434 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3435 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3437 ; GFX10-LABEL: name: sample_cd_1d
3438 ; GFX10: bb.1.main_body:
3439 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
3440 ; GFX10-NEXT: {{ $}}
3441 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3442 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3443 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3444 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3445 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3446 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3447 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3448 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3449 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3450 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3451 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3452 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3453 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3454 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3455 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3456 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3457 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3458 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3459 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3460 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3461 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3462 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3463 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3464 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3465 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3466 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3467 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3468 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3469 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3470 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3471 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3473 ; GFX11-LABEL: name: sample_cd_1d
3474 ; GFX11: bb.1.main_body:
3475 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
3476 ; GFX11-NEXT: {{ $}}
3477 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3478 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3479 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3480 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3481 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3482 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3483 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3484 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3485 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3486 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3487 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3488 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3489 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3490 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3491 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3492 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3493 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3494 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3495 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3496 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3497 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3498 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3499 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3500 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3501 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3502 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3503 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3504 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3505 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3506 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3507 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3509 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3513 define amdgpu_ps <4 x float> @sample_cd_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
3514 ; GFX9-LABEL: name: sample_cd_2d
3515 ; GFX9: bb.1.main_body:
3516 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3518 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3519 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3520 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3521 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3522 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3523 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3524 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3525 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3526 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3527 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3528 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3529 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3530 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3531 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3532 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3533 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3534 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3535 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3536 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3537 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3538 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3539 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3540 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3541 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3542 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3543 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3544 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3545 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3546 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3547 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3548 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3549 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3550 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3551 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3552 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3553 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3554 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3556 ; GFX10-LABEL: name: sample_cd_2d
3557 ; GFX10: bb.1.main_body:
3558 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3559 ; GFX10-NEXT: {{ $}}
3560 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3561 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3562 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3563 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3564 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3565 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3566 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3567 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3568 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3569 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3570 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3571 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3572 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3573 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3574 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3575 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3576 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3577 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3578 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3579 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3580 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3581 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3582 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3583 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3584 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3585 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3586 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3587 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3588 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3589 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3590 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3591 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3592 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3593 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3594 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3595 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3597 ; GFX11-LABEL: name: sample_cd_2d
3598 ; GFX11: bb.1.main_body:
3599 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
3600 ; GFX11-NEXT: {{ $}}
3601 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3602 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3603 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3604 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3605 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3606 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3607 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3608 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3609 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3610 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3611 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3612 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3613 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3614 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3615 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3616 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3617 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3618 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3619 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3620 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3621 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3622 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3623 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3624 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3625 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3626 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3627 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3628 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3629 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3630 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3631 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3632 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3633 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3634 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3635 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3636 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3638 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3642 define amdgpu_ps <4 x float> @sample_c_cd_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s) {
3643 ; GFX9-LABEL: name: sample_c_cd_1d
3644 ; GFX9: bb.1.main_body:
3645 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3647 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3648 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3649 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3650 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3651 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3652 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3653 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3654 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3655 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3656 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3657 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3658 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3659 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3660 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3661 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3662 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3663 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3664 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3665 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3666 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3667 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3668 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3669 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3670 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3671 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3672 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3673 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3674 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3675 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3676 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3677 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3678 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3679 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3680 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3682 ; GFX10-LABEL: name: sample_c_cd_1d
3683 ; GFX10: bb.1.main_body:
3684 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3685 ; GFX10-NEXT: {{ $}}
3686 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3687 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3688 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3689 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3690 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3691 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3692 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3693 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3694 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3695 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3696 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3697 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3698 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3699 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3700 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3701 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3702 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3703 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3704 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3705 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3706 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3707 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3708 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3709 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3710 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3711 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3712 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3713 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3714 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3715 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3716 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3717 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3718 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3720 ; GFX11-LABEL: name: sample_c_cd_1d
3721 ; GFX11: bb.1.main_body:
3722 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3723 ; GFX11-NEXT: {{ $}}
3724 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3725 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3726 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3727 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3728 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3729 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3730 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3731 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3732 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3733 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3734 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3735 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3736 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3737 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3738 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3739 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3740 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3741 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3742 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3743 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3744 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3745 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3746 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3747 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3748 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3749 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
3750 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3751 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3752 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3753 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3754 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3755 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3756 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3758 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3762 define amdgpu_ps <4 x float> @sample_c_cd_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t) {
3763 ; GFX9-LABEL: name: sample_c_cd_2d
3764 ; GFX9: bb.1.main_body:
3765 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3767 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3768 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3769 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3770 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3771 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3772 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3773 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3774 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3775 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3776 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3777 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3778 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3779 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3780 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3781 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3782 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3783 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3784 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3785 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3786 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3787 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3788 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3789 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3790 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3791 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3792 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3793 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3794 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3795 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3796 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3797 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3798 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3799 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3800 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3801 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3802 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3803 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3804 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3805 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3807 ; GFX10-LABEL: name: sample_c_cd_2d
3808 ; GFX10: bb.1.main_body:
3809 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3810 ; GFX10-NEXT: {{ $}}
3811 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3812 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3813 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3814 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3815 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3816 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3817 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3818 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3819 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3820 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3821 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3822 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3823 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3824 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3825 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3826 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3827 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3828 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3829 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3830 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3831 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3832 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3833 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3834 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3835 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3836 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3837 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3838 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3839 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3840 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3841 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3842 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3843 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3844 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3845 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3846 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3847 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3848 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3850 ; GFX11-LABEL: name: sample_c_cd_2d
3851 ; GFX11: bb.1.main_body:
3852 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
3853 ; GFX11-NEXT: {{ $}}
3854 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3855 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3856 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3857 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3858 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3859 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3860 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3861 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3862 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3863 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3864 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3865 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3866 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3867 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3868 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3869 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3870 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3871 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3872 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3873 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3874 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3875 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
3876 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
3877 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
3878 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
3879 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
3880 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
3881 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
3882 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
3883 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3884 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
3885 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3886 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3887 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
3888 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
3889 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
3890 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
3891 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3893 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
3897 define amdgpu_ps <4 x float> @sample_cd_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, half %s, half %clamp) {
3898 ; GFX9-LABEL: name: sample_cd_cl_1d
3899 ; GFX9: bb.1.main_body:
3900 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3902 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3903 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3904 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3905 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3906 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3907 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3908 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3909 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3910 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3911 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3912 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3913 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3914 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3915 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3916 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3917 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3918 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3919 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3920 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3921 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3922 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3923 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3924 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3925 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3926 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3927 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3928 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
3929 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3930 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3931 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
3932 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
3933 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
3934 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
3935 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3937 ; GFX10-LABEL: name: sample_cd_cl_1d
3938 ; GFX10: bb.1.main_body:
3939 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3940 ; GFX10-NEXT: {{ $}}
3941 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3942 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3943 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3944 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3945 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3946 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3947 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3948 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3949 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3950 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3951 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3952 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3953 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3954 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3955 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3956 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3957 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3958 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3959 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3960 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3961 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
3962 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
3963 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
3964 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
3965 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
3966 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
3967 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
3968 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
3969 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
3970 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
3971 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
3972 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
3973 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
3975 ; GFX11-LABEL: name: sample_cd_cl_1d
3976 ; GFX11: bb.1.main_body:
3977 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
3978 ; GFX11-NEXT: {{ $}}
3979 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
3980 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
3981 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
3982 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
3983 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
3984 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
3985 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
3986 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
3987 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
3988 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
3989 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
3990 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
3991 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
3992 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
3993 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
3994 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
3995 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
3996 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
3997 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
3998 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
3999 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4000 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4001 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4002 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4003 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4004 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4005 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4006 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4007 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4008 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4009 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4010 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4011 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4013 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.1d.v4f32.f16.f16(i32 15, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4017 define amdgpu_ps <4 x float> @sample_cd_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
4018 ; GFX9-LABEL: name: sample_cd_cl_2d
4019 ; GFX9: bb.1.main_body:
4020 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4022 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4023 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4024 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4025 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4026 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4027 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4028 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4029 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4030 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4031 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4032 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4033 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4034 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4035 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4036 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4037 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4038 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4039 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4040 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4041 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4042 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4043 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4044 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4045 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4046 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4047 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4048 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4049 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4050 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4051 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4052 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4053 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4054 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4055 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
4056 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4057 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4058 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4059 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4060 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4061 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4062 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4064 ; GFX10-LABEL: name: sample_cd_cl_2d
4065 ; GFX10: bb.1.main_body:
4066 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4067 ; GFX10-NEXT: {{ $}}
4068 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4069 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4070 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4071 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4072 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4073 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4074 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4075 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4076 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4077 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4078 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4079 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4080 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4081 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4082 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4083 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4084 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4085 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4086 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4087 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4088 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4089 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4090 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4091 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4092 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4093 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4094 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4095 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4096 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4097 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4098 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4099 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4100 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4101 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4102 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4103 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4104 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4105 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4106 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4107 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4109 ; GFX11-LABEL: name: sample_cd_cl_2d
4110 ; GFX11: bb.1.main_body:
4111 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
4112 ; GFX11-NEXT: {{ $}}
4113 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4114 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4115 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4116 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4117 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4118 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4119 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4120 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4121 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4122 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4123 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4124 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4125 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4126 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4127 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4128 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4129 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4130 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4131 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4132 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4133 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4134 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4135 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4136 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4137 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4138 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4139 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4140 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4141 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4142 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4143 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4144 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4145 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4146 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.cd.cl.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4147 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4148 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4149 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4150 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4151 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4152 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4154 %v = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.2d.v4f32.f16.f16(i32 15, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4158 define amdgpu_ps <4 x float> @sample_c_cd_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp) {
4159 ; GFX9-LABEL: name: sample_c_cd_cl_1d
4160 ; GFX9: bb.1.main_body:
4161 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4163 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4164 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4165 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4166 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4167 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4168 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4169 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4170 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4171 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4172 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4173 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4174 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4175 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4176 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4177 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4178 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4179 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4180 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4181 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4182 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4183 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4184 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4185 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4186 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4187 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4188 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4189 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4190 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4191 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>)
4192 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4193 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4194 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4195 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4196 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4197 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4198 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4200 ; GFX10-LABEL: name: sample_c_cd_cl_1d
4201 ; GFX10: bb.1.main_body:
4202 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4203 ; GFX10-NEXT: {{ $}}
4204 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4205 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4206 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4207 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4208 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4209 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4210 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4211 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4212 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4213 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4214 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4215 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4216 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4217 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4218 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4219 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4220 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4221 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4222 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4223 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4224 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4225 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4226 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4227 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4228 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4229 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4230 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4231 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4232 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4233 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4234 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4235 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4236 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4237 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4238 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4240 ; GFX11-LABEL: name: sample_c_cd_cl_1d
4241 ; GFX11: bb.1.main_body:
4242 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
4243 ; GFX11-NEXT: {{ $}}
4244 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4245 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4246 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4247 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4248 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4249 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4250 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4251 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4252 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4253 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4254 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4255 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4256 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4257 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4258 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4259 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4260 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4261 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4262 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4263 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4264 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4265 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4266 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4267 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4268 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4269 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4270 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[DEF]](s16)
4271 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4272 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.1d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4273 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4274 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4275 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4276 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4277 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4278 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4280 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.1d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dsdv, half %s, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4284 define amdgpu_ps <4 x float> @sample_c_cd_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp) {
4285 ; GFX9-LABEL: name: sample_c_cd_cl_2d
4286 ; GFX9: bb.1.main_body:
4287 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4289 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4290 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4291 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4292 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4293 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4294 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4295 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4296 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4297 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4298 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4299 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4300 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4301 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4302 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4303 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4304 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4305 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4306 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4307 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4308 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4309 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4310 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4311 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4312 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4313 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4314 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4315 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4316 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4317 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4318 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4319 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4320 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4321 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4322 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4323 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4324 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
4325 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[CONCAT_VECTORS]](<10 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4326 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4327 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4328 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4329 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4330 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4331 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4333 ; GFX10-LABEL: name: sample_c_cd_cl_2d
4334 ; GFX10: bb.1.main_body:
4335 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4336 ; GFX10-NEXT: {{ $}}
4337 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4338 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4339 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4340 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4341 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4342 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4343 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4344 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4345 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4346 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4347 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4348 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4349 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4350 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4351 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4352 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4353 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4354 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4355 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4356 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4357 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4358 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4359 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4360 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4361 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4362 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4363 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4364 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4365 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4366 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4367 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4368 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4369 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4370 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4371 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4372 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4373 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4374 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4375 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4376 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4377 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4378 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4380 ; GFX11-LABEL: name: sample_c_cd_cl_2d
4381 ; GFX11: bb.1.main_body:
4382 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
4383 ; GFX11-NEXT: {{ $}}
4384 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4385 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4386 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4387 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4388 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4389 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4390 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4391 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4392 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4393 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4394 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4395 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4396 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4397 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4398 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4399 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4400 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4401 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4402 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4403 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4404 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4405 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
4406 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
4407 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
4408 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
4409 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
4410 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
4411 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
4412 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
4413 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4414 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4415 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
4416 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
4417 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4418 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
4419 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.cd.cl.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4420 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4421 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4422 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4423 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4424 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4425 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4427 %v = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.2d.v4f32.f32.f16(i32 15, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4431 define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %lod) {
4432 ; GFX9-LABEL: name: sample_l_1d
4433 ; GFX9: bb.1.main_body:
4434 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
4436 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4437 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4438 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4439 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4440 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4441 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4442 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4443 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4444 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4445 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4446 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4447 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4448 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4449 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4450 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4451 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4452 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4453 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4454 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4455 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4456 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4457 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4458 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4459 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4460 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4461 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4463 ; GFX10-LABEL: name: sample_l_1d
4464 ; GFX10: bb.1.main_body:
4465 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
4466 ; GFX10-NEXT: {{ $}}
4467 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4468 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4469 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4470 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4471 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4472 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4473 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4474 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4475 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4476 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4477 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4478 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4479 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4480 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4481 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4482 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4483 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4484 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4485 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4486 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4487 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4488 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4489 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4490 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4491 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4492 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4494 ; GFX11-LABEL: name: sample_l_1d
4495 ; GFX11: bb.1.main_body:
4496 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
4497 ; GFX11-NEXT: {{ $}}
4498 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4499 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4500 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4501 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4502 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4503 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4504 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4505 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4506 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4507 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4508 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4509 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4510 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4511 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4512 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4513 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4514 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4515 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4516 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4517 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4518 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4519 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4520 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4521 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4522 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4523 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4525 %v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32 15, half %s, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4529 define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
4530 ; GFX9-LABEL: name: sample_l_2d
4531 ; GFX9: bb.1.main_body:
4532 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4534 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4535 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4536 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4537 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4538 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4539 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4540 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4541 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4542 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4543 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4544 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4545 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4546 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4547 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4548 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4549 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4550 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4551 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4552 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4553 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4554 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4555 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4556 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4557 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
4558 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4559 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4560 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4561 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4562 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4563 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4564 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4566 ; GFX10-LABEL: name: sample_l_2d
4567 ; GFX10: bb.1.main_body:
4568 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4569 ; GFX10-NEXT: {{ $}}
4570 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4571 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4572 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4573 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4574 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4575 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4576 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4577 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4578 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4579 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4580 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4581 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4582 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4583 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4584 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4585 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4586 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4587 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4588 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4589 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4590 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4591 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4592 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4593 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
4594 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4595 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4596 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4597 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4598 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4599 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4600 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4602 ; GFX11-LABEL: name: sample_l_2d
4603 ; GFX11: bb.1.main_body:
4604 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4605 ; GFX11-NEXT: {{ $}}
4606 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4607 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4608 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4609 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4610 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4611 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4612 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4613 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4614 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4615 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4616 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4617 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4618 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4619 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4620 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4621 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4622 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4623 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4624 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4625 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4626 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4627 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4628 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4629 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
4630 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4631 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4632 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4633 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4634 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4635 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4636 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4638 %v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32 15, half %s, half %t, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4642 define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %lod) {
4643 ; GFX9-LABEL: name: sample_c_l_1d
4644 ; GFX9: bb.1.main_body:
4645 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4647 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4648 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4649 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4650 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4651 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4652 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4653 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4654 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4655 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4656 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4657 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4658 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4659 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4660 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4661 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4662 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4663 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4664 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4665 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4666 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4667 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4668 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
4669 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4670 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4671 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4672 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4673 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4674 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4675 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4677 ; GFX10-LABEL: name: sample_c_l_1d
4678 ; GFX10: bb.1.main_body:
4679 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4680 ; GFX10-NEXT: {{ $}}
4681 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4682 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4683 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4684 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4685 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4686 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4687 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4688 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4689 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4690 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4691 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4692 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4693 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4694 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4695 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4696 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4697 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4698 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4699 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4700 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4701 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4702 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
4703 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4704 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4705 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4706 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4707 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4708 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4709 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4711 ; GFX11-LABEL: name: sample_c_l_1d
4712 ; GFX11: bb.1.main_body:
4713 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
4714 ; GFX11-NEXT: {{ $}}
4715 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4716 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4717 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4718 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4719 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4720 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4721 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4722 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4723 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4724 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4725 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4726 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4727 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4728 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4729 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4730 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4731 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4732 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4733 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4734 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4735 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4736 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
4737 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4738 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4739 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4740 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4741 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4742 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4743 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4745 %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32 15, float %zcompare, half %s, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4749 define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
4750 ; GFX9-LABEL: name: sample_c_l_2d
4751 ; GFX9: bb.1.main_body:
4752 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4754 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4755 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4756 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4757 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4758 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4759 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4760 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4761 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4762 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4763 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4764 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4765 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4766 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4767 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4768 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4769 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4770 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4771 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4772 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4773 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4774 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4775 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4776 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4777 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4778 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4779 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>)
4780 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4781 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4782 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4783 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4784 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4785 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4786 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4788 ; GFX10-LABEL: name: sample_c_l_2d
4789 ; GFX10: bb.1.main_body:
4790 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4791 ; GFX10-NEXT: {{ $}}
4792 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4793 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4794 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4795 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4796 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4797 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4798 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4799 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4800 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4801 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4802 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4803 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4804 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4805 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4806 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4807 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4808 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4809 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4810 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4811 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4812 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4813 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4814 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4815 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4816 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4817 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4818 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4819 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4820 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4821 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4822 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4823 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4825 ; GFX11-LABEL: name: sample_c_l_2d
4826 ; GFX11: bb.1.main_body:
4827 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
4828 ; GFX11-NEXT: {{ $}}
4829 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4830 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4831 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4832 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4833 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4834 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4835 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4836 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4837 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4838 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4839 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4840 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4841 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4842 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4843 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4844 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4845 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4846 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
4847 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
4848 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
4849 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
4850 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
4851 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4852 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4853 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
4854 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4855 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4856 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4857 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4858 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4859 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4860 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4862 %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4866 define amdgpu_ps <4 x float> @sample_lz_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s) {
4867 ; GFX9-LABEL: name: sample_lz_1d
4868 ; GFX9: bb.1.main_body:
4869 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
4871 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4872 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4873 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4874 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4875 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4876 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4877 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4878 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4879 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4880 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4881 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4882 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4883 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4884 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4885 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4886 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4887 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4888 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4889 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4890 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4891 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4892 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4893 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4894 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4895 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4897 ; GFX10-LABEL: name: sample_lz_1d
4898 ; GFX10: bb.1.main_body:
4899 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
4900 ; GFX10-NEXT: {{ $}}
4901 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4902 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4903 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4904 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4905 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4906 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4907 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4908 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4909 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4910 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4911 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4912 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4913 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4914 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4915 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4916 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4917 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4918 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4919 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4920 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4921 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
4922 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
4923 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
4924 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
4925 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4927 ; GFX11-LABEL: name: sample_lz_1d
4928 ; GFX11: bb.1.main_body:
4929 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
4930 ; GFX11-NEXT: {{ $}}
4931 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4932 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4933 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4934 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4935 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4936 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4937 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4938 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4939 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4940 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4941 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4942 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4943 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4944 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4945 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4946 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4947 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
4948 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
4949 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.1d), 15, [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
4950 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4951 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
4952 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
4953 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
4954 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
4955 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4957 %v = call <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f16(i32 15, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
4961 define amdgpu_ps <4 x float> @sample_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t) {
4962 ; GFX9-LABEL: name: sample_lz_2d
4963 ; GFX9: bb.1.main_body:
4964 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
4966 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4967 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4968 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
4969 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
4970 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
4971 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
4972 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
4973 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
4974 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
4975 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
4976 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
4977 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
4978 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
4979 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
4980 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
4981 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
4982 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
4983 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
4984 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
4985 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
4986 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
4987 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
4988 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
4989 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
4990 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
4991 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
4993 ; GFX10-LABEL: name: sample_lz_2d
4994 ; GFX10: bb.1.main_body:
4995 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
4996 ; GFX10-NEXT: {{ $}}
4997 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
4998 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
4999 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5000 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5001 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5002 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5003 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5004 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5005 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5006 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5007 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5008 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5009 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5010 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5011 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5012 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5013 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5014 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5015 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5016 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5017 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5018 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5019 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5020 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5021 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5022 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5024 ; GFX11-LABEL: name: sample_lz_2d
5025 ; GFX11: bb.1.main_body:
5026 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5027 ; GFX11-NEXT: {{ $}}
5028 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5029 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5030 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5031 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5032 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5033 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5034 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5035 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5036 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5037 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5038 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5039 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5040 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5041 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5042 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5043 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
5044 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5045 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5046 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5047 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.lz.2d), 15, [[BUILD_VECTOR2]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5048 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5049 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5050 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5051 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5052 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5053 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5055 %v = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f16(i32 15, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5059 define amdgpu_ps <4 x float> @sample_c_lz_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s) {
5060 ; GFX9-LABEL: name: sample_c_lz_1d
5061 ; GFX9: bb.1.main_body:
5062 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5064 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5065 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5066 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5067 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5068 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5069 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5070 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5071 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5072 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5073 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5074 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5075 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5076 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5077 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5078 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5079 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5080 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5081 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5082 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5083 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5084 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
5085 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5086 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5087 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5088 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5089 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5090 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5091 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5093 ; GFX10-LABEL: name: sample_c_lz_1d
5094 ; GFX10: bb.1.main_body:
5095 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5096 ; GFX10-NEXT: {{ $}}
5097 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5098 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5099 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5100 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5101 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5102 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5103 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5104 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5105 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5106 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5107 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5108 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5109 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5110 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5111 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5112 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5113 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5114 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5115 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5116 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5117 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
5118 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5119 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5120 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5121 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5122 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5123 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5124 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5126 ; GFX11-LABEL: name: sample_c_lz_1d
5127 ; GFX11: bb.1.main_body:
5128 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
5129 ; GFX11-NEXT: {{ $}}
5130 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5131 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5132 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5133 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5134 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5135 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5136 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5137 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5138 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5139 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5140 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5141 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5142 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5143 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5144 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5145 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5146 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5147 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5148 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5149 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[DEF]](s16)
5150 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
5151 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5152 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5153 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5154 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5155 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5156 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5157 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5159 %v = call <4 x float> @llvm.amdgcn.image.sample.c.lz.1d.v4f32.f16(i32 15, float %zcompare, half %s, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5163 define amdgpu_ps <4 x float> @sample_c_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t) {
5164 ; GFX9-LABEL: name: sample_c_lz_2d
5165 ; GFX9: bb.1.main_body:
5166 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
5168 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5169 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5170 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5171 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5172 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5173 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5174 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5175 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5176 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5177 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5178 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5179 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5180 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5181 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5182 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5183 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5184 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5185 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5186 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5187 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5188 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5189 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
5190 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<4 x s32>), addrspace 8)
5191 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5192 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5193 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5194 ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
5195 ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
5196 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5198 ; GFX10-LABEL: name: sample_c_lz_2d
5199 ; GFX10: bb.1.main_body:
5200 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
5201 ; GFX10-NEXT: {{ $}}
5202 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5203 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5204 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5205 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5206 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5207 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5208 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5209 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5210 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5211 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5212 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5213 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5214 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5215 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5216 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5217 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5218 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5219 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5220 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5221 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5222 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5223 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
5224 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5225 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5226 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5227 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5228 ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
5229 ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
5230 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5232 ; GFX11-LABEL: name: sample_c_lz_2d
5233 ; GFX11: bb.1.main_body:
5234 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
5235 ; GFX11-NEXT: {{ $}}
5236 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5237 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5238 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5239 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5240 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5241 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5242 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5243 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5244 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5245 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5246 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5247 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5248 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5249 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5250 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5251 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5252 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
5253 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5254 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5255 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5256 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5257 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>)
5258 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.lz.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 1 :: (dereferenceable load (<4 x s32>), addrspace 8)
5259 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
5260 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5261 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5262 ; GFX11-NEXT: $vgpr2 = COPY [[UV2]](s32)
5263 ; GFX11-NEXT: $vgpr3 = COPY [[UV3]](s32)
5264 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
5266 %v = call <4 x float> @llvm.amdgcn.image.sample.c.lz.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5270 define amdgpu_ps float @sample_c_d_o_2darray_V1(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice) {
5271 ; GFX9-LABEL: name: sample_c_d_o_2darray_V1
5272 ; GFX9: bb.1.main_body:
5273 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
5275 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5276 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5277 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5278 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5279 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5280 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5281 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5282 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5283 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5284 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5285 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5286 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5287 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5288 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5289 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5290 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5291 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5292 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5293 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5294 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5295 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5296 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5297 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5298 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5299 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5300 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5301 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5302 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5303 ; GFX9-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
5304 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
5305 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5306 ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
5307 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5308 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5309 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5310 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5311 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5312 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5313 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
5314 ; GFX9-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
5315 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
5317 ; GFX10-LABEL: name: sample_c_d_o_2darray_V1
5318 ; GFX10: bb.1.main_body:
5319 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
5320 ; GFX10-NEXT: {{ $}}
5321 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5322 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5323 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5324 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5325 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5326 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5327 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5328 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5329 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5330 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5331 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5332 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5333 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5334 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5335 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5336 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5337 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5338 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5339 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5340 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5341 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5342 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5343 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5344 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5345 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5346 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5347 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5348 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5349 ; GFX10-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
5350 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
5351 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5352 ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
5353 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5354 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5355 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5356 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5357 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5358 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5359 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
5360 ; GFX10-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
5361 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
5363 ; GFX11-LABEL: name: sample_c_d_o_2darray_V1
5364 ; GFX11: bb.1.main_body:
5365 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
5366 ; GFX11-NEXT: {{ $}}
5367 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5368 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5369 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5370 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5371 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5372 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5373 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5374 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5375 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5376 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5377 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5378 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5379 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5380 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5381 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5382 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5383 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5384 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5385 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5386 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5387 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5388 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5389 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5390 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5391 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5392 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5393 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5394 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5395 ; GFX11-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
5396 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
5397 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5398 ; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
5399 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5400 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5401 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5402 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5403 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5404 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5405 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 4, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (s32), addrspace 8)
5406 ; GFX11-NEXT: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
5407 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
5409 %v = call float @llvm.amdgcn.image.sample.c.d.o.2darray.f32.f16.f16(i32 4, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5413 define amdgpu_ps <2 x float> @sample_c_d_o_2darray_V2(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice) {
5414 ; GFX9-LABEL: name: sample_c_d_o_2darray_V2
5415 ; GFX9: bb.1.main_body:
5416 ; GFX9-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
5418 ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5419 ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5420 ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5421 ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5422 ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5423 ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5424 ; GFX9-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5425 ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5426 ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5427 ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5428 ; GFX9-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5429 ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5430 ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5431 ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5432 ; GFX9-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5433 ; GFX9-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5434 ; GFX9-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5435 ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5436 ; GFX9-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5437 ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5438 ; GFX9-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5439 ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5440 ; GFX9-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5441 ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5442 ; GFX9-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5443 ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5444 ; GFX9-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5445 ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5446 ; GFX9-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
5447 ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
5448 ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5449 ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
5450 ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5451 ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5452 ; GFX9-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5453 ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5454 ; GFX9-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5455 ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5456 ; GFX9-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
5457 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
5458 ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
5459 ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
5460 ; GFX9-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
5462 ; GFX10-LABEL: name: sample_c_d_o_2darray_V2
5463 ; GFX10: bb.1.main_body:
5464 ; GFX10-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
5465 ; GFX10-NEXT: {{ $}}
5466 ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5467 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5468 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5469 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5470 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5471 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5472 ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5473 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5474 ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5475 ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5476 ; GFX10-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5477 ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5478 ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5479 ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5480 ; GFX10-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5481 ; GFX10-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5482 ; GFX10-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5483 ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5484 ; GFX10-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5485 ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5486 ; GFX10-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5487 ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5488 ; GFX10-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5489 ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5490 ; GFX10-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5491 ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5492 ; GFX10-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5493 ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5494 ; GFX10-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
5495 ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
5496 ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5497 ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
5498 ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5499 ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5500 ; GFX10-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5501 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5502 ; GFX10-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5503 ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5504 ; GFX10-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[CONCAT_VECTORS]](<12 x s16>), $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
5505 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
5506 ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
5507 ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
5508 ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
5510 ; GFX11-LABEL: name: sample_c_d_o_2darray_V2
5511 ; GFX11: bb.1.main_body:
5512 ; GFX11-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
5513 ; GFX11-NEXT: {{ $}}
5514 ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
5515 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
5516 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
5517 ; GFX11-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
5518 ; GFX11-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
5519 ; GFX11-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
5520 ; GFX11-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
5521 ; GFX11-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
5522 ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
5523 ; GFX11-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
5524 ; GFX11-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
5525 ; GFX11-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
5526 ; GFX11-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
5527 ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
5528 ; GFX11-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
5529 ; GFX11-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
5530 ; GFX11-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
5531 ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
5532 ; GFX11-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
5533 ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
5534 ; GFX11-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
5535 ; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
5536 ; GFX11-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
5537 ; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
5538 ; GFX11-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr6
5539 ; GFX11-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
5540 ; GFX11-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr7
5541 ; GFX11-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
5542 ; GFX11-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr8
5543 ; GFX11-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
5544 ; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
5545 ; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
5546 ; GFX11-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
5547 ; GFX11-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
5548 ; GFX11-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC4]](s16), [[TRUNC5]](s16)
5549 ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
5550 ; GFX11-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC6]](s16), [[DEF]](s16)
5551 ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR4]](<2 x s16>), [[BUILD_VECTOR5]](<2 x s16>)
5552 ; GFX11-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<2 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.d.o.2darray), 6, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>), [[BUILD_VECTOR3]](<2 x s16>), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0, 3 :: (dereferenceable load (<2 x s32>), addrspace 8)
5553 ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<2 x s32>)
5554 ; GFX11-NEXT: $vgpr0 = COPY [[UV]](s32)
5555 ; GFX11-NEXT: $vgpr1 = COPY [[UV1]](s32)
5556 ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
5558 %v = call <2 x float> @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32.f32.f16(i32 6, i32 %offset, float %zcompare, half %dsdh, half %dtdh, half %dsdv, half %dtdv, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
5562 declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f16(i32, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5563 declare <8 x float> @llvm.amdgcn.image.sample.1d.v8f32.f16(i32, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5564 declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5565 declare <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5566 declare <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5567 declare <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5568 declare <4 x float> @llvm.amdgcn.image.sample.2darray.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5570 declare <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f16(i32, float, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5571 declare <4 x float> @llvm.amdgcn.image.sample.c.2d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5572 declare <4 x float> @llvm.amdgcn.image.sample.cl.1d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5573 declare <4 x float> @llvm.amdgcn.image.sample.cl.2d.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5574 declare <4 x float> @llvm.amdgcn.image.sample.c.cl.1d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5575 declare <4 x float> @llvm.amdgcn.image.sample.c.cl.2d.v4f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5577 declare <4 x float> @llvm.amdgcn.image.sample.b.1d.v4f32.f16.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5578 declare <4 x float> @llvm.amdgcn.image.sample.b.2d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5579 declare <4 x float> @llvm.amdgcn.image.sample.c.b.1d.v4f32.f16.f16(i32, half, float, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5580 declare <4 x float> @llvm.amdgcn.image.sample.c.b.2d.v4f32.f16.f16(i32, half, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5581 declare <4 x float> @llvm.amdgcn.image.sample.b.cl.1d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5582 declare <4 x float> @llvm.amdgcn.image.sample.b.cl.2d.v4f32.f16.f16(i32, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5583 declare <4 x float> @llvm.amdgcn.image.sample.c.b.cl.1d.v4f32.f16.f16(i32, half, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5584 declare <4 x float> @llvm.amdgcn.image.sample.c.b.cl.2d.v4f32.f16.f16(i32, half, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5586 declare <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5587 declare <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5588 declare <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f16.f16(i32, half, half, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5589 declare <4 x float> @llvm.amdgcn.image.sample.c.d.1d.v4f32.f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5590 declare <4 x float> @llvm.amdgcn.image.sample.c.d.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5591 declare <4 x float> @llvm.amdgcn.image.sample.d.cl.1d.v4f32.f16.f16(i32, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5592 declare <4 x float> @llvm.amdgcn.image.sample.d.cl.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5593 declare <4 x float> @llvm.amdgcn.image.sample.c.d.cl.1d.v4f32.f32.f16(i32, float, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5594 declare <4 x float> @llvm.amdgcn.image.sample.c.d.cl.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5596 declare <4 x float> @llvm.amdgcn.image.sample.cd.1d.v4f32.f16.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5597 declare <4 x float> @llvm.amdgcn.image.sample.cd.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5598 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.1d.v4f32.f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5599 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5600 declare <4 x float> @llvm.amdgcn.image.sample.cd.cl.1d.v4f32.f16.f16(i32, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5601 declare <4 x float> @llvm.amdgcn.image.sample.cd.cl.2d.v4f32.f16.f16(i32, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5602 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.1d.v4f32.f32.f16(i32, float, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5603 declare <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.2d.v4f32.f32.f16(i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5605 declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5606 declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5607 declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5608 declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32, float, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5610 declare <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f16(i32, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5611 declare <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f16(i32, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5612 declare <4 x float> @llvm.amdgcn.image.sample.c.lz.1d.v4f32.f16(i32, float, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5613 declare <4 x float> @llvm.amdgcn.image.sample.c.lz.2d.v4f32.f16(i32, float, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5615 declare float @llvm.amdgcn.image.sample.c.d.o.2darray.f32.f16.f16(i32, i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5616 declare <2 x float> @llvm.amdgcn.image.sample.c.d.o.2darray.v2f32.f32.f16(i32, i32, float, half, half, half, half, half, half, half, <8 x i32>, <4 x i32>, i1, i32, i32) #1
5618 attributes #0 = { nounwind }
5619 attributes #1 = { nounwind readonly }
5620 attributes #2 = { nounwind readnone }