1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s
3 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s
4 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s
6 define amdgpu_ps <4 x float> @load_2darraymsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
7 ; GFX6-LABEL: name: load_2darraymsaa
8 ; GFX6: bb.1 (%ir-block.0):
9 ; GFX6-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3
11 ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
12 ; GFX6-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
13 ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
14 ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
15 ; GFX6-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
16 ; GFX6-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
17 ; GFX6-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
18 ; GFX6-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
19 ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
20 ; GFX6-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
21 ; GFX6-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
22 ; GFX6-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
23 ; GFX6-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
24 ; GFX6-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
25 ; GFX6-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[BUILD_VECTOR1]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>), addrspace 8)
26 ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
27 ; GFX6-NEXT: $vgpr0 = COPY [[UV]](s32)
28 ; GFX6-NEXT: $vgpr1 = COPY [[UV1]](s32)
29 ; GFX6-NEXT: $vgpr2 = COPY [[UV2]](s32)
30 ; GFX6-NEXT: $vgpr3 = COPY [[UV3]](s32)
31 ; GFX6-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
32 ; GFX10NSA-LABEL: name: load_2darraymsaa
33 ; GFX10NSA: bb.1 (%ir-block.0):
34 ; GFX10NSA-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3
35 ; GFX10NSA-NEXT: {{ $}}
36 ; GFX10NSA-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
37 ; GFX10NSA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
38 ; GFX10NSA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
39 ; GFX10NSA-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
40 ; GFX10NSA-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
41 ; GFX10NSA-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
42 ; GFX10NSA-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
43 ; GFX10NSA-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
44 ; GFX10NSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
45 ; GFX10NSA-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
46 ; GFX10NSA-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
47 ; GFX10NSA-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
48 ; GFX10NSA-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
49 ; GFX10NSA-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[BUILD_VECTOR]](<8 x s32>), 0, 0, 0 :: (dereferenceable load (<4 x s32>), addrspace 8)
50 ; GFX10NSA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
51 ; GFX10NSA-NEXT: $vgpr0 = COPY [[UV]](s32)
52 ; GFX10NSA-NEXT: $vgpr1 = COPY [[UV1]](s32)
53 ; GFX10NSA-NEXT: $vgpr2 = COPY [[UV2]](s32)
54 ; GFX10NSA-NEXT: $vgpr3 = COPY [[UV3]](s32)
55 ; GFX10NSA-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
56 %v = call <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
60 define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
61 ; GFX6-LABEL: name: load_2darraymsaa_tfe
62 ; GFX6: bb.1 (%ir-block.0):
63 ; GFX6-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3
65 ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
66 ; GFX6-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
67 ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
68 ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
69 ; GFX6-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
70 ; GFX6-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
71 ; GFX6-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
72 ; GFX6-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
73 ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
74 ; GFX6-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
75 ; GFX6-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
76 ; GFX6-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
77 ; GFX6-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr0
78 ; GFX6-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr1
79 ; GFX6-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr2
80 ; GFX6-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr3
81 ; GFX6-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32)
82 ; GFX6-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<5 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[BUILD_VECTOR1]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0, 0 :: (dereferenceable load (<4 x s32>), addrspace 8)
83 ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<5 x s32>)
84 ; GFX6-NEXT: G_STORE [[UV4]](s32), [[MV]](p1) :: (store (s32) into %ir.out, addrspace 1)
85 ; GFX6-NEXT: $vgpr0 = COPY [[UV]](s32)
86 ; GFX6-NEXT: $vgpr1 = COPY [[UV1]](s32)
87 ; GFX6-NEXT: $vgpr2 = COPY [[UV2]](s32)
88 ; GFX6-NEXT: $vgpr3 = COPY [[UV3]](s32)
89 ; GFX6-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
90 ; GFX10NSA-LABEL: name: load_2darraymsaa_tfe
91 ; GFX10NSA: bb.1 (%ir-block.0):
92 ; GFX10NSA-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3
93 ; GFX10NSA-NEXT: {{ $}}
94 ; GFX10NSA-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
95 ; GFX10NSA-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
96 ; GFX10NSA-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
97 ; GFX10NSA-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
98 ; GFX10NSA-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
99 ; GFX10NSA-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
100 ; GFX10NSA-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
101 ; GFX10NSA-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
102 ; GFX10NSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
103 ; GFX10NSA-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
104 ; GFX10NSA-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
105 ; GFX10NSA-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
106 ; GFX10NSA-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr0
107 ; GFX10NSA-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr1
108 ; GFX10NSA-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr2
109 ; GFX10NSA-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr3
110 ; GFX10NSA-NEXT: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<5 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0, 0 :: (dereferenceable load (<4 x s32>), addrspace 8)
111 ; GFX10NSA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<5 x s32>)
112 ; GFX10NSA-NEXT: G_STORE [[UV4]](s32), [[MV]](p1) :: (store (s32) into %ir.out, addrspace 1)
113 ; GFX10NSA-NEXT: $vgpr0 = COPY [[UV]](s32)
114 ; GFX10NSA-NEXT: $vgpr1 = COPY [[UV1]](s32)
115 ; GFX10NSA-NEXT: $vgpr2 = COPY [[UV2]](s32)
116 ; GFX10NSA-NEXT: $vgpr3 = COPY [[UV3]](s32)
117 ; GFX10NSA-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
118 %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.2darraymsaa.sl_v4f32i32s.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
119 %v.vec = extractvalue { <4 x float>, i32 } %v, 0
120 %v.err = extractvalue { <4 x float>, i32 } %v, 1
121 store i32 %v.err, ptr addrspace(1) %out, align 4
122 ret <4 x float> %v.vec
125 declare <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
126 declare { <4 x float>, i32 } @llvm.amdgcn.image.load.2darraymsaa.sl_v4f32i32s.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
128 attributes #0 = { nounwind readonly }