1 # RUN: not --crash llc -mtriple=amdgcn -mcpu=gfx90a -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s
2 # RUN: not --crash llc -mtriple=amdgcn -mcpu=gfx90a --passes='machine-function(verify)' -o /dev/null %s 2>&1 | FileCheck %s
4 # Implicit uses are OK.
9 $vgpr1_vgpr2 = IMPLICIT_DEF
10 S_NOP 0, implicit $vgpr1_vgpr2
11 %0:vreg_64 = IMPLICIT_DEF
14 %1:sreg_64_xexec = IMPLICIT_DEF
15 %2:sreg_64_xexec = SI_CALL %1, 0, csr_amdgpu, implicit $vgpr1_vgpr2
18 DS_WRITE_B64_gfx9 $noreg, $noreg, 0, 0, implicit $exec
21 # The unaligned registers are allowed to exist, just not on any tuple instructions.
24 name: copy_like_generic
27 $vgpr1_vgpr2 = IMPLICIT_DEF
28 $vgpr3_vgpr4 = COPY $vgpr1_vgpr2
29 %0:vreg_64 = IMPLICIT_DEF
34 name: mov_32_unaligned_super
37 undef %0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
38 %1:vgpr_32 = V_MOV_B32_e32 undef %2.sub1:vreg_64, implicit $exec
41 # Well-aligned subregister indexes are OK
46 %0:vreg_64_align2 = IMPLICIT_DEF
47 %1:vreg_128_align2 = IMPLICIT_DEF
48 GLOBAL_STORE_DWORDX2 %0, %1.sub0_sub1, 0, 0, implicit $exec
49 GLOBAL_STORE_DWORDX2 %0, %1.sub2_sub3, 0, 0, implicit $exec
53 name: unaligned_registers
56 liveins: $vgpr0_vgpr1, $vgpr3_vgpr4_vgpr5_vgpr6
57 %0:vreg_64_align2 = IMPLICIT_DEF
58 %1:vreg_64 = IMPLICIT_DEF
59 %2:vreg_96 = IMPLICIT_DEF
60 %3:vreg_128 = IMPLICIT_DEF
61 %4:areg_64 = IMPLICIT_DEF
62 %5:vreg_128_align2 = IMPLICIT_DEF
64 ; Check virtual register uses
65 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
66 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
67 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
68 GLOBAL_STORE_DWORDX2 %0, %1, 0, 0, implicit $exec
69 GLOBAL_STORE_DWORDX3 %0, %2, 0, 0, implicit $exec
70 GLOBAL_STORE_DWORDX4 %0, %3, 0, 0, implicit $exec
72 ; Check virtual registers with subregisters
73 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
74 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
75 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
76 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
77 GLOBAL_STORE_DWORDX2 %0, %3.sub0_sub1, 0, 0, implicit $exec
78 GLOBAL_STORE_DWORDX2 %0, %3.sub2_sub3, 0, 0, implicit $exec
79 GLOBAL_STORE_DWORDX2 %0, %3.sub1_sub2, 0, 0, implicit $exec
80 GLOBAL_STORE_DWORDX2 %0, %5.sub1_sub2, 0, 0, implicit $exec
82 ; Check physical register uses
83 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
84 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
85 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
86 GLOBAL_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr3_vgpr4, 0, 0, implicit $exec
87 GLOBAL_STORE_DWORDX3 $vgpr0_vgpr1, $vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec
88 GLOBAL_STORE_DWORDX4 $vgpr0_vgpr1, $vgpr3_vgpr4_vgpr5_vgpr6, 0, 0, implicit $exec
90 ; Check virtual register defs
91 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
92 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
93 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
94 %6:vreg_64 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec
95 %7:vreg_96 = GLOBAL_LOAD_DWORDX3 %0, 0, 0, implicit $exec
96 %8:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec
98 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
99 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
100 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
101 $vgpr1_vgpr2 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec
102 $vgpr1_vgpr2_vgpr3 = GLOBAL_LOAD_DWORDX3 %0, 0, 0, implicit $exec
103 $vgpr1_vgpr2_vgpr3_vgpr4 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec
106 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
107 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
108 %9:vgpr_32 = IMPLICIT_DEF
109 %10:areg_64 = IMPLICIT_DEF
110 %11:areg_128_align2 = IMPLICIT_DEF
111 DS_WRITE_B64_gfx9 %9, %10, 0, 0, implicit $exec
112 DS_WRITE_B64_gfx9 %9, %11.sub1_sub2, 0, 0, implicit $exec
114 ; Check aligned vgprs for FP32 Packed Math instructions.
115 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
116 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
117 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
118 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
119 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
120 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
121 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
122 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
123 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
124 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
125 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
126 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
127 %12:vreg_64 = IMPLICIT_DEF
128 %13:vreg_64_align2 = IMPLICIT_DEF
129 %14:areg_96_align2 = IMPLICIT_DEF
130 $vgpr3_vgpr4 = V_PK_MOV_B32 8, 0, 8, 0, 0, 0, 0, 0, 0, implicit $exec
131 $vgpr0_vgpr1 = V_PK_ADD_F32 0, %12, 11, %13, 0, 0, 0, 0, implicit $mode, implicit $exec
132 $vgpr0_vgpr1 = V_PK_ADD_F32 0, %13, 11, %12, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
133 $vgpr0_vgpr1 = V_PK_ADD_F32 0, %13, 11, %14.sub1_sub2, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
134 $vgpr0_vgpr1 = V_PK_ADD_F32 0, %14.sub1_sub2, 11, %13, 0, 0, 0, 0, implicit $mode, implicit $exec
135 $vgpr0_vgpr1 = V_PK_MUL_F32 0, %12, 11, %13, 0, 0, 0, 0, implicit $mode, implicit $exec
136 $vgpr0_vgpr1 = V_PK_MUL_F32 0, %13, 11, %12, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
137 $vgpr0_vgpr1 = V_PK_MUL_F32 0, %13, 11, %14.sub1_sub2, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
138 $vgpr0_vgpr1 = V_PK_MUL_F32 0, %14.sub1_sub2, 11, %13, 0, 0, 0, 0, implicit $mode, implicit $exec
139 $vgpr0_vgpr1 = nofpexcept V_PK_FMA_F32 8, %12, 8, %13, 11, %14.sub0_sub1, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
140 $vgpr0_vgpr1 = nofpexcept V_PK_FMA_F32 8, %13, 8, %12, 11, %14.sub0_sub1, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
141 $vgpr0_vgpr1 = nofpexcept V_PK_FMA_F32 8, %13, 8, %13, 11, %14.sub1_sub2, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
144 # FIXME: Inline asm is not verified
146 # ; XCHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
147 # ; XCHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
148 # ; XCHECK: *** Bad machine code: Subtarget requires even aligned vector registers ***
149 # INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 9 /* reguse */, $vgpr1_vgpr2
150 # INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 9 /* reguse */, %4
151 # INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 9 /* reguse */, %5.sub1_sub2