1 //=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
13 class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
14 list<string> altNames = []>
15 : Register<n, altNames> {
17 let Namespace = "AArch64";
18 let SubRegs = subregs;
21 let Namespace = "AArch64" in {
22 def sub_32 : SubRegIndex<32>;
24 def bsub : SubRegIndex<8>;
25 def hsub : SubRegIndex<16>;
26 def ssub : SubRegIndex<32>;
27 def dsub : SubRegIndex<64>;
28 def sube32 : SubRegIndex<32>;
29 def subo32 : SubRegIndex<32>;
30 def sube64 : SubRegIndex<64>;
31 def subo64 : SubRegIndex<64>;
33 def zsub : SubRegIndex<128>;
34 // Note: Code depends on these having consecutive numbers
35 def dsub0 : SubRegIndex<64>;
36 def dsub1 : SubRegIndex<64>;
37 def dsub2 : SubRegIndex<64>;
38 def dsub3 : SubRegIndex<64>;
39 // Note: Code depends on these having consecutive numbers
40 def qsub0 : SubRegIndex<128>;
41 def qsub1 : SubRegIndex<128>;
42 def qsub2 : SubRegIndex<128>;
43 def qsub3 : SubRegIndex<128>;
44 // Note: Code depends on these having consecutive numbers
45 def zasubb : SubRegIndex<2048>; // (16 x 16)/1 bytes = 2048 bits
46 def zasubh0 : SubRegIndex<1024>; // (16 x 16)/2 bytes = 1024 bits
47 def zasubh1 : SubRegIndex<1024>; // (16 x 16)/2 bytes = 1024 bits
48 def zasubs0 : SubRegIndex<512>; // (16 x 16)/4 bytes = 512 bits
49 def zasubs1 : SubRegIndex<512>; // (16 x 16)/4 bytes = 512 bits
50 def zasubd0 : SubRegIndex<256>; // (16 x 16)/8 bytes = 256 bits
51 def zasubd1 : SubRegIndex<256>; // (16 x 16)/8 bytes = 256 bits
52 def zasubq0 : SubRegIndex<128>; // (16 x 16)/16 bytes = 128 bits
53 def zasubq1 : SubRegIndex<128>; // (16 x 16)/16 bytes = 128 bits
55 def psub : SubRegIndex<16>;
58 let Namespace = "AArch64" in {
59 def vreg : RegAltNameIndex;
60 def vlist1 : RegAltNameIndex;
63 //===----------------------------------------------------------------------===//
65 //===----------------------------------------------------------------------===//
66 def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>;
67 def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>;
68 def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>;
69 def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>;
70 def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>;
71 def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>;
72 def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>;
73 def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>;
74 def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>;
75 def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>;
76 def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
77 def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
78 def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
79 def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
80 def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
81 def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
82 def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
83 def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
84 def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
85 def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
86 def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
87 def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
88 def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
89 def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
90 def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
91 def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
92 def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
93 def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
94 def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
95 def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
96 def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
97 def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
98 let isConstant = true in
99 def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
101 let SubRegIndices = [sub_32] in {
102 def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>;
103 def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>;
104 def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>;
105 def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>;
106 def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>;
107 def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>;
108 def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>;
109 def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>;
110 def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>;
111 def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>;
112 def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
113 def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
114 def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
115 def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
116 def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
117 def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
118 def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
119 def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
120 def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
121 def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
122 def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
123 def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
124 def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
125 def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
126 def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
127 def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
128 def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
129 def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
130 def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
131 def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
132 def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
133 def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>;
134 let isConstant = true in
135 def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
138 // Condition code register.
139 def NZCV : AArch64Reg<0, "nzcv">;
141 // First fault status register
142 def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
144 // Purely virtual Vector Granule (VG) Dwarf register
145 def VG : AArch64Reg<0, "vg">, DwarfRegNum<[46]>;
147 // Floating-point control register
148 def FPCR : AArch64Reg<0, "fpcr">;
150 // Floating-point status register.
151 def FPSR : AArch64Reg<0, "fpsr">;
153 // GPR register classes with the intersections of GPR32/GPR32sp and
154 // GPR64/GPR64sp for use by the coalescer.
155 def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
156 let AltOrders = [(rotl GPR32common, 8)];
157 let AltOrderSelect = [{ return 1; }];
159 def GPR64common : RegisterClass<"AArch64", [i64], 64,
160 (add (sequence "X%u", 0, 28), FP, LR)> {
161 let AltOrders = [(rotl GPR64common, 8)];
162 let AltOrderSelect = [{ return 1; }];
164 // GPR register classes which exclude SP/WSP.
165 def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
166 let AltOrders = [(rotl GPR32, 8)];
167 let AltOrderSelect = [{ return 1; }];
169 def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
170 let AltOrders = [(rotl GPR64, 8)];
171 let AltOrderSelect = [{ return 1; }];
174 // GPR register classes which include SP/WSP.
175 def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
176 let AltOrders = [(rotl GPR32sp, 8)];
177 let AltOrderSelect = [{ return 1; }];
179 def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
180 let AltOrders = [(rotl GPR64sp, 8)];
181 let AltOrderSelect = [{ return 1; }];
184 def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
185 def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
187 def GPR64spPlus0Operand : AsmOperandClass {
188 let Name = "GPR64sp0";
189 let RenderMethod = "addRegOperands";
190 let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
191 let ParserMethod = "tryParseGPR64sp0Operand";
194 def GPR64sp0 : RegisterOperand<GPR64sp> {
195 let ParserMatchClass = GPR64spPlus0Operand;
198 // GPR32/GPR64 but with zero-register substitution enabled.
199 // TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
200 def GPR32z : RegisterOperand<GPR32> {
201 let GIZeroRegister = WZR;
203 def GPR64z : RegisterOperand<GPR64> {
204 let GIZeroRegister = XZR;
207 // GPR argument registers.
208 def GPR32arg : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 7)>;
209 def GPR64arg : RegisterClass<"AArch64", [i64], 64, (sequence "X%u", 0, 7)>;
211 // GPR register classes which include WZR/XZR AND SP/WSP. This is not a
212 // constraint used by any instructions, it is used as a common super-class.
213 def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
214 def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
216 // For tail calls, we can't use callee-saved registers, as they are restored
217 // to the saved value before the tail call, which would clobber a call address.
218 // This is for indirect tail calls to store the address of the destination.
219 def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
220 X22, X23, X24, X25, X26,
223 // Restricted sets of tail call registers, for use when branch target
224 // enforcement or PAuthLR are enabled.
225 // For BTI, x16 and x17 are the only registers which can be used to indirectly
226 // branch (not call) to the "BTI c" instruction at the start of a BTI-protected
228 // For PAuthLR, x16 must be used in the function epilogue for other purposes,
229 // so cannot hold the function pointer.
230 def tcGPRx17 : RegisterClass<"AArch64", [i64], 64, (add X17)>;
231 def tcGPRx16x17 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
232 def tcGPRnotx16 : RegisterClass<"AArch64", [i64], 64, (sub tcGPR64, X16)>;
234 // Register set that excludes registers that are reserved for procedure calls.
235 // This is used for pseudo-instructions that are actually implemented using a
237 def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)>;
239 // GPR register classes for post increment amount of vector load/store that
240 // has alternate printing when Rm=31 and prints a constant immediate value
241 // equal to the total number of bytes transferred.
243 // FIXME: TableGen *should* be able to do these itself now. There appears to be
244 // a bug in counting how many operands a Post-indexed MCInst should have which
245 // means the aliases don't trigger.
246 def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">;
247 def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">;
248 def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">;
249 def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">;
250 def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">;
251 def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">;
252 def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
253 def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
254 def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
255 def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
256 def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
257 def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
259 // Condition code regclass.
260 def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
261 let CopyCost = -1; // Don't allow copying of status registers.
263 // CCR is not allocatable.
264 let isAllocatable = 0;
267 //===----------------------------------------------------------------------===//
268 // Floating Point Scalar Registers
269 //===----------------------------------------------------------------------===//
271 def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>;
272 def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>;
273 def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>;
274 def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>;
275 def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>;
276 def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>;
277 def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>;
278 def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>;
279 def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>;
280 def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>;
281 def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
282 def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
283 def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
284 def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
285 def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
286 def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
287 def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
288 def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
289 def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
290 def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
291 def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
292 def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
293 def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
294 def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
295 def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
296 def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
297 def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
298 def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
299 def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
300 def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
301 def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
302 def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
304 let SubRegIndices = [bsub] in {
305 def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>;
306 def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>;
307 def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>;
308 def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>;
309 def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>;
310 def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>;
311 def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>;
312 def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>;
313 def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>;
314 def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>;
315 def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
316 def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
317 def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
318 def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
319 def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
320 def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
321 def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
322 def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
323 def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
324 def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
325 def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
326 def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
327 def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
328 def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
329 def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
330 def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
331 def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
332 def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
333 def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
334 def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
335 def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
336 def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
339 let SubRegIndices = [hsub] in {
340 def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>;
341 def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>;
342 def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>;
343 def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>;
344 def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>;
345 def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>;
346 def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>;
347 def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>;
348 def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>;
349 def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>;
350 def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
351 def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
352 def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
353 def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
354 def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
355 def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
356 def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
357 def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
358 def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
359 def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
360 def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
361 def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
362 def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
363 def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
364 def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
365 def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
366 def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
367 def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
368 def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
369 def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
370 def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
371 def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
374 let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
375 def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
376 def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
377 def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
378 def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
379 def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
380 def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
381 def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
382 def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
383 def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
384 def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
385 def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
386 def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
387 def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
388 def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
389 def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
390 def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
391 def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
392 def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
393 def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
394 def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
395 def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
396 def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
397 def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
398 def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
399 def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
400 def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
401 def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
402 def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
403 def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
404 def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
405 def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
406 def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
409 let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
410 def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
411 def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
412 def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
413 def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
414 def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
415 def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
416 def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
417 def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
418 def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
419 def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
420 def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
421 def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
422 def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
423 def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
424 def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
425 def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
426 def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
427 def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
428 def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
429 def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
430 def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
431 def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
432 def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
433 def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
434 def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
435 def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
436 def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
437 def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
438 def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
439 def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
440 def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
441 def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
444 def FPR8 : RegisterClass<"AArch64", [i8], 8, (sequence "B%u", 0, 31)> {
447 def FPR16 : RegisterClass<"AArch64", [f16, bf16, i16], 16, (sequence "H%u", 0, 31)> {
451 def FPR16_lo : RegisterClass<"AArch64", [f16], 16, (trunc FPR16, 16)> {
454 def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
455 def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
456 v1i64, v4f16, v4bf16],
457 64, (sequence "D%u", 0, 31)>;
458 def FPR64_lo : RegisterClass<"AArch64",
459 [v8i8, v4i16, v2i32, v1i64, v4f16, v4bf16, v2f32,
461 64, (trunc FPR64, 16)>;
463 // We don't (yet) have an f128 legal type, so don't use that here. We
464 // normalize 128-bit vectors to v2f64 for arg passing and such, so use
466 def FPR128 : RegisterClass<"AArch64",
467 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
469 128, (sequence "Q%u", 0, 31)>;
471 // The lower 16 vector registers. Some instructions can only take registers
473 def FPR128_lo : RegisterClass<"AArch64",
474 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
476 128, (trunc FPR128, 16)>;
478 // The lower 8 vector registers. Some instructions can only take registers
480 def FPR128_0to7 : RegisterClass<"AArch64",
481 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
483 128, (trunc FPR128, 8)>;
485 // Pairs, triples, and quads of 64-bit vector registers.
486 def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
487 def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
488 [(rotl FPR64, 0), (rotl FPR64, 1),
490 def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
491 [(rotl FPR64, 0), (rotl FPR64, 1),
492 (rotl FPR64, 2), (rotl FPR64, 3)]>;
493 def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
496 def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
499 def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
503 // Pairs, triples, and quads of 128-bit vector registers.
504 def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
505 def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
506 [(rotl FPR128, 0), (rotl FPR128, 1),
508 def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
509 [(rotl FPR128, 0), (rotl FPR128, 1),
510 (rotl FPR128, 2), (rotl FPR128, 3)]>;
511 def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
514 def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
517 def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
522 // Vector operand versions of the FP registers. Alternate name printing and
523 // assembler matching.
524 def VectorReg64AsmOperand : AsmOperandClass {
525 let Name = "VectorReg64";
526 let PredicateMethod = "isNeonVectorReg";
528 def VectorReg128AsmOperand : AsmOperandClass {
529 let Name = "VectorReg128";
530 let PredicateMethod = "isNeonVectorReg";
533 def V64 : RegisterOperand<FPR64, "printVRegOperand"> {
534 let ParserMatchClass = VectorReg64AsmOperand;
537 def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
538 let ParserMatchClass = VectorReg128AsmOperand;
541 def VectorRegLoAsmOperand : AsmOperandClass {
542 let Name = "VectorRegLo";
543 let PredicateMethod = "isNeonVectorRegLo";
545 def V64_lo : RegisterOperand<FPR64_lo, "printVRegOperand"> {
546 let ParserMatchClass = VectorRegLoAsmOperand;
548 def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
549 let ParserMatchClass = VectorRegLoAsmOperand;
552 def VectorReg0to7AsmOperand : AsmOperandClass {
553 let Name = "VectorReg0to7";
554 let PredicateMethod = "isNeonVectorReg0to7";
557 def V128_0to7 : RegisterOperand<FPR128_0to7, "printVRegOperand"> {
558 let ParserMatchClass = VectorReg0to7AsmOperand;
561 class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
563 let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
566 = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
567 let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">";
570 class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
571 : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
574 multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
575 // With implicit types (probably on instruction instead). E.g. { v0, v1 }
576 def _64AsmOperand : AsmOperandClass {
577 let Name = NAME # "64";
578 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
579 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
582 def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
583 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
586 def _128AsmOperand : AsmOperandClass {
587 let Name = NAME # "128";
588 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
589 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
592 def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
593 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
596 // 64-bit register lists with explicit type.
599 def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
600 def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
601 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
605 def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
606 def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
607 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
611 def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
612 def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
613 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
617 def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
618 def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
619 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
622 // 128-bit register lists with explicit type
624 // { v0.16b, v1.16b }
625 def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
626 def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
627 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
631 def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
632 def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
633 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
637 def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
638 def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
639 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
643 def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
644 def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
645 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
649 def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
650 def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
651 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
655 def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
656 def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
657 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
661 def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
662 def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
663 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
667 def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
668 def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
669 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
675 defm VecListOne : VectorList<1, FPR64, FPR128>;
676 defm VecListTwo : VectorList<2, DD, QQ>;
677 defm VecListThree : VectorList<3, DDD, QQQ>;
678 defm VecListFour : VectorList<4, DDDD, QQQQ>;
680 class FPRAsmOperand<string RC> : AsmOperandClass {
681 let Name = "FPRAsmOperand" # RC;
682 let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
683 let RenderMethod = "addRegOperands";
686 // Register operand versions of the scalar FP registers.
687 def FPR8Op : RegisterOperand<FPR8, "printOperand"> {
688 let ParserMatchClass = FPRAsmOperand<"FPR8">;
691 def FPR16Op : RegisterOperand<FPR16, "printOperand"> {
692 let ParserMatchClass = FPRAsmOperand<"FPR16">;
695 def FPR16Op_lo : RegisterOperand<FPR16_lo, "printOperand"> {
696 let ParserMatchClass = FPRAsmOperand<"FPR16_lo">;
699 def FPR32Op : RegisterOperand<FPR32, "printOperand"> {
700 let ParserMatchClass = FPRAsmOperand<"FPR32">;
703 def FPR64Op : RegisterOperand<FPR64, "printOperand"> {
704 let ParserMatchClass = FPRAsmOperand<"FPR64">;
707 def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
708 let ParserMatchClass = FPRAsmOperand<"FPR128">;
711 //===----------------------------------------------------------------------===//
712 // ARMv8.1a atomic CASP register operands
715 def WSeqPairs : RegisterTuples<[sube32, subo32],
716 [(decimate (rotl GPR32, 0), 2),
717 (decimate (rotl GPR32, 1), 2)]>;
718 def XSeqPairs : RegisterTuples<[sube64, subo64],
719 [(decimate (rotl GPR64, 0), 2),
720 (decimate (rotl GPR64, 1), 2)]>;
722 def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32,
726 def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64,
732 let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
733 def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
734 def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
737 def WSeqPairClassOperand :
738 RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
739 let ParserMatchClass = WSeqPairsAsmOperandClass;
741 def XSeqPairClassOperand :
742 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
743 let ParserMatchClass = XSeqPairsAsmOperandClass;
745 // Reuse the parsing and register numbers from XSeqPairs, but encoding is different.
746 def MrrsMssrPairClassOperand :
747 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
748 let ParserMatchClass = XSeqPairsAsmOperandClass;
750 def SyspXzrPairOperandMatcherClass : AsmOperandClass {
751 let Name = "SyspXzrPair";
752 let RenderMethod = "addSyspXzrPairOperand";
753 let ParserMethod = "tryParseSyspXzrPair";
755 def SyspXzrPairOperand :
756 RegisterOperand<GPR64, "printSyspXzrPair"> { // needed to allow alias with XZR operand
757 let ParserMatchClass = SyspXzrPairOperandMatcherClass;
762 //===----- END: v8.1a atomic CASP register operands -----------------------===//
764 //===----------------------------------------------------------------------===//
765 // Armv8.7a accelerator extension register operands: 8 consecutive GPRs
766 // starting with an even one
768 let Namespace = "AArch64" in {
770 def "x8sub_"#i : SubRegIndex<64, !mul(64, i)>;
773 def Tuples8X : RegisterTuples<
774 !foreach(i, [0,1,2,3,4,5,6,7], !cast<SubRegIndex>("x8sub_"#i)),
775 !foreach(i, [0,1,2,3,4,5,6,7], (trunc (decimate (rotl GPR64, i), 2), 12))>;
777 def GPR64x8Class : RegisterClass<"AArch64", [i64x8], 512, (trunc Tuples8X, 12)> {
780 def GPR64x8AsmOp : AsmOperandClass {
781 let Name = "GPR64x8";
782 let ParserMethod = "tryParseGPR64x8";
783 let RenderMethod = "addRegOperands";
785 def GPR64x8 : RegisterOperand<GPR64x8Class, "printGPR64x8"> {
786 let ParserMatchClass = GPR64x8AsmOp;
787 let PrintMethod = "printGPR64x8";
790 //===----- END: v8.7a accelerator extension register operands -------------===//
792 // SVE predicate-as-counter registers
793 def PN0 : AArch64Reg<0, "pn0">, DwarfRegNum<[48]>;
794 def PN1 : AArch64Reg<1, "pn1">, DwarfRegNum<[49]>;
795 def PN2 : AArch64Reg<2, "pn2">, DwarfRegNum<[50]>;
796 def PN3 : AArch64Reg<3, "pn3">, DwarfRegNum<[51]>;
797 def PN4 : AArch64Reg<4, "pn4">, DwarfRegNum<[52]>;
798 def PN5 : AArch64Reg<5, "pn5">, DwarfRegNum<[53]>;
799 def PN6 : AArch64Reg<6, "pn6">, DwarfRegNum<[54]>;
800 def PN7 : AArch64Reg<7, "pn7">, DwarfRegNum<[55]>;
801 def PN8 : AArch64Reg<8, "pn8">, DwarfRegNum<[56]>;
802 def PN9 : AArch64Reg<9, "pn9">, DwarfRegNum<[57]>;
803 def PN10 : AArch64Reg<10, "pn10">, DwarfRegNum<[58]>;
804 def PN11 : AArch64Reg<11, "pn11">, DwarfRegNum<[59]>;
805 def PN12 : AArch64Reg<12, "pn12">, DwarfRegNum<[60]>;
806 def PN13 : AArch64Reg<13, "pn13">, DwarfRegNum<[61]>;
807 def PN14 : AArch64Reg<14, "pn14">, DwarfRegNum<[62]>;
808 def PN15 : AArch64Reg<15, "pn15">, DwarfRegNum<[63]>;
810 // SVE predicate registers
811 let SubRegIndices = [psub] in {
812 def P0 : AArch64Reg<0, "p0", [PN0]>, DwarfRegAlias<PN0>;
813 def P1 : AArch64Reg<1, "p1", [PN1]>, DwarfRegAlias<PN1>;
814 def P2 : AArch64Reg<2, "p2", [PN2]>, DwarfRegAlias<PN2>;
815 def P3 : AArch64Reg<3, "p3", [PN3]>, DwarfRegAlias<PN3>;
816 def P4 : AArch64Reg<4, "p4", [PN4]>, DwarfRegAlias<PN4>;
817 def P5 : AArch64Reg<5, "p5", [PN5]>, DwarfRegAlias<PN5>;
818 def P6 : AArch64Reg<6, "p6", [PN6]>, DwarfRegAlias<PN6>;
819 def P7 : AArch64Reg<7, "p7", [PN7]>, DwarfRegAlias<PN7>;
820 def P8 : AArch64Reg<8, "p8", [PN8]>, DwarfRegAlias<PN8>;
821 def P9 : AArch64Reg<9, "p9", [PN9]>, DwarfRegAlias<PN9>;
822 def P10 : AArch64Reg<10, "p10", [PN10]>, DwarfRegAlias<PN10>;
823 def P11 : AArch64Reg<11, "p11", [PN11]>, DwarfRegAlias<PN11>;
824 def P12 : AArch64Reg<12, "p12", [PN12]>, DwarfRegAlias<PN12>;
825 def P13 : AArch64Reg<13, "p13", [PN13]>, DwarfRegAlias<PN13>;
826 def P14 : AArch64Reg<14, "p14", [PN14]>, DwarfRegAlias<PN14>;
827 def P15 : AArch64Reg<15, "p15", [PN15]>, DwarfRegAlias<PN15>;
830 // SVE variable-size vector registers
831 let SubRegIndices = [zsub] in {
832 def Z0 : AArch64Reg<0, "z0", [Q0]>, DwarfRegNum<[96]>;
833 def Z1 : AArch64Reg<1, "z1", [Q1]>, DwarfRegNum<[97]>;
834 def Z2 : AArch64Reg<2, "z2", [Q2]>, DwarfRegNum<[98]>;
835 def Z3 : AArch64Reg<3, "z3", [Q3]>, DwarfRegNum<[99]>;
836 def Z4 : AArch64Reg<4, "z4", [Q4]>, DwarfRegNum<[100]>;
837 def Z5 : AArch64Reg<5, "z5", [Q5]>, DwarfRegNum<[101]>;
838 def Z6 : AArch64Reg<6, "z6", [Q6]>, DwarfRegNum<[102]>;
839 def Z7 : AArch64Reg<7, "z7", [Q7]>, DwarfRegNum<[103]>;
840 def Z8 : AArch64Reg<8, "z8", [Q8]>, DwarfRegNum<[104]>;
841 def Z9 : AArch64Reg<9, "z9", [Q9]>, DwarfRegNum<[105]>;
842 def Z10 : AArch64Reg<10, "z10", [Q10]>, DwarfRegNum<[106]>;
843 def Z11 : AArch64Reg<11, "z11", [Q11]>, DwarfRegNum<[107]>;
844 def Z12 : AArch64Reg<12, "z12", [Q12]>, DwarfRegNum<[108]>;
845 def Z13 : AArch64Reg<13, "z13", [Q13]>, DwarfRegNum<[109]>;
846 def Z14 : AArch64Reg<14, "z14", [Q14]>, DwarfRegNum<[110]>;
847 def Z15 : AArch64Reg<15, "z15", [Q15]>, DwarfRegNum<[111]>;
848 def Z16 : AArch64Reg<16, "z16", [Q16]>, DwarfRegNum<[112]>;
849 def Z17 : AArch64Reg<17, "z17", [Q17]>, DwarfRegNum<[113]>;
850 def Z18 : AArch64Reg<18, "z18", [Q18]>, DwarfRegNum<[114]>;
851 def Z19 : AArch64Reg<19, "z19", [Q19]>, DwarfRegNum<[115]>;
852 def Z20 : AArch64Reg<20, "z20", [Q20]>, DwarfRegNum<[116]>;
853 def Z21 : AArch64Reg<21, "z21", [Q21]>, DwarfRegNum<[117]>;
854 def Z22 : AArch64Reg<22, "z22", [Q22]>, DwarfRegNum<[118]>;
855 def Z23 : AArch64Reg<23, "z23", [Q23]>, DwarfRegNum<[119]>;
856 def Z24 : AArch64Reg<24, "z24", [Q24]>, DwarfRegNum<[120]>;
857 def Z25 : AArch64Reg<25, "z25", [Q25]>, DwarfRegNum<[121]>;
858 def Z26 : AArch64Reg<26, "z26", [Q26]>, DwarfRegNum<[122]>;
859 def Z27 : AArch64Reg<27, "z27", [Q27]>, DwarfRegNum<[123]>;
860 def Z28 : AArch64Reg<28, "z28", [Q28]>, DwarfRegNum<[124]>;
861 def Z29 : AArch64Reg<29, "z29", [Q29]>, DwarfRegNum<[125]>;
862 def Z30 : AArch64Reg<30, "z30", [Q30]>, DwarfRegNum<[126]>;
863 def Z31 : AArch64Reg<31, "z31", [Q31]>, DwarfRegNum<[127]>;
866 // Enum describing the element size for destructive
868 class ElementSizeEnum<bits<3> val> {
872 def ElementSizeNone : ElementSizeEnum<0>;
873 def ElementSizeB : ElementSizeEnum<1>;
874 def ElementSizeH : ElementSizeEnum<2>;
875 def ElementSizeS : ElementSizeEnum<3>;
876 def ElementSizeD : ElementSizeEnum<4>;
877 def ElementSizeQ : ElementSizeEnum<5>; // Unused
879 class SVERegOp <string Suffix, AsmOperandClass C,
880 ElementSizeEnum Size,
881 RegisterClass RC> : RegisterOperand<RC> {
882 ElementSizeEnum ElementSize;
884 let ElementSize = Size;
885 let PrintMethod = !if(!eq(Suffix, ""),
887 "printSVERegOp<'" # Suffix # "'>");
888 let ParserMatchClass = C;
891 class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
892 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
894 //******************************************************************************
896 // SVE predicate register classes.
897 class PPRClass<int firstreg, int lastreg> : RegisterClass<
899 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16,
900 (sequence "P%u", firstreg, lastreg)> {
904 def PPR : PPRClass<0, 15>;
905 def PPR_3b : PPRClass<0, 7>; // Restricted 3 bit SVE predicate register class.
906 def PPR_p8to15 : PPRClass<8, 15>;
908 class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
909 let Name = "SVE" # name # "Reg";
910 let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
911 # Width # ", " # "AArch64::" # RegClass # "RegClassID>";
912 let DiagnosticType = "InvalidSVE" # name # "Reg";
913 let RenderMethod = "addRegOperands";
914 let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateVector>";
917 def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>;
918 def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>;
919 def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>;
920 def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>;
921 def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>;
922 def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>;
924 class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
925 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
927 def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>;
928 def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>;
929 def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>;
930 def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>;
931 def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>;
932 def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
934 class PNRClass<int firstreg, int lastreg> : RegisterClass<
936 [ aarch64svcount ], 16,
937 (sequence "PN%u", firstreg, lastreg)> {
941 def PNR : PNRClass<0, 15>;
942 def PNR_3b : PNRClass<0, 7>;
943 def PNR_p8to15 : PNRClass<8, 15>;
945 // SVE predicate-as-counter operand
946 class PNRAsmOperand<string name, string RegClass, int Width>: AsmOperandClass {
947 let Name = "SVE" # name # "Reg";
948 let PredicateMethod = "isSVEPredicateAsCounterRegOfWidth<"
949 # Width # ", " # "AArch64::"
950 # RegClass # "RegClassID>";
951 let DiagnosticType = "InvalidSVE" # name # "Reg";
952 let RenderMethod = "addRegOperands";
953 let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>";
956 def PNRAsmOpAny: PNRAsmOperand<"PNPredicateAny", "PNR", 0>;
957 def PNRAsmOp8 : PNRAsmOperand<"PNPredicateB", "PNR", 8>;
958 def PNRAsmOp16 : PNRAsmOperand<"PNPredicateH", "PNR", 16>;
959 def PNRAsmOp32 : PNRAsmOperand<"PNPredicateS", "PNR", 32>;
960 def PNRAsmOp64 : PNRAsmOperand<"PNPredicateD", "PNR", 64>;
962 class PNRRegOp<string Suffix, AsmOperandClass C, int Size, RegisterClass RC>
963 : SVERegOp<Suffix, C, ElementSizeNone, RC> {
964 let PrintMethod = "printPredicateAsCounter<" # Size # ">";
966 def PNRAny : PNRRegOp<"", PNRAsmOpAny, 0, PNR>;
967 def PNR8 : PNRRegOp<"b", PNRAsmOp8, 8, PNR>;
968 def PNR16 : PNRRegOp<"h", PNRAsmOp16, 16, PNR>;
969 def PNR32 : PNRRegOp<"s", PNRAsmOp32, 32, PNR>;
970 def PNR64 : PNRRegOp<"d", PNRAsmOp64, 64, PNR>;
972 def PNRAsmAny_p8to15 : PNRAsmOperand<"PNPredicateAny_p8to15", "PNR_p8to15", 0>;
973 def PNRAsmOp8_p8to15 : PNRAsmOperand<"PNPredicateB_p8to15", "PNR_p8to15", 8>;
974 def PNRAsmOp16_p8to15 : PNRAsmOperand<"PNPredicateH_p8to15", "PNR_p8to15", 16>;
975 def PNRAsmOp32_p8to15 : PNRAsmOperand<"PNPredicateS_p8to15", "PNR_p8to15", 32>;
976 def PNRAsmOp64_p8to15 : PNRAsmOperand<"PNPredicateD_p8to15", "PNR_p8to15", 64>;
978 class PNRP8to15RegOp<string Suffix, AsmOperandClass C, int Width, RegisterClass RC>
979 : SVERegOp<Suffix, C, ElementSizeNone, RC> {
980 let PrintMethod = "printPredicateAsCounter<" # Width # ">";
981 let EncoderMethod = "EncodePNR_p8to15";
982 let DecoderMethod = "DecodePNR_p8to15RegisterClass";
985 def PNRAny_p8to15 : PNRP8to15RegOp<"", PNRAsmAny_p8to15, 0, PNR_p8to15>;
986 def PNR8_p8to15 : PNRP8to15RegOp<"b", PNRAsmOp8_p8to15, 8, PNR_p8to15>;
987 def PNR16_p8to15 : PNRP8to15RegOp<"h", PNRAsmOp16_p8to15, 16, PNR_p8to15>;
988 def PNR32_p8to15 : PNRP8to15RegOp<"s", PNRAsmOp32_p8to15, 32, PNR_p8to15>;
989 def PNR64_p8to15 : PNRP8to15RegOp<"d", PNRAsmOp64_p8to15, 64, PNR_p8to15>;
991 let Namespace = "AArch64" in {
992 def psub0 : SubRegIndex<16, -1>;
993 def psub1 : SubRegIndex<16, -1>;
996 class PPRorPNRClass : RegisterClass<
998 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1, aarch64svcount ], 16,
1003 class PPRorPNRAsmOperand<string name, string RegClass, int Width>: AsmOperandClass {
1004 let Name = "SVE" # name # "Reg";
1005 let PredicateMethod = "isSVEPredicateOrPredicateAsCounterRegOfWidth<"
1006 # Width # ", " # "AArch64::"
1007 # RegClass # "RegClassID>";
1008 let DiagnosticType = "InvalidSVE" # name # "Reg";
1009 let RenderMethod = "addPPRorPNRRegOperands";
1010 let ParserMethod = "tryParseSVEPredicateOrPredicateAsCounterVector";
1013 def PPRorPNR : PPRorPNRClass;
1014 def PPRorPNRAsmOp8 : PPRorPNRAsmOperand<"PPRorPNRB", "PPRorPNR", 8>;
1015 def PPRorPNRAsmOpAny : PPRorPNRAsmOperand<"PPRorPNRAny", "PPRorPNR", 0>;
1016 def PPRorPNRAny : PPRRegOp<"", PPRorPNRAsmOpAny, ElementSizeNone, PPRorPNR>;
1017 def PPRorPNR8 : PPRRegOp<"b", PPRorPNRAsmOp8, ElementSizeB, PPRorPNR>;
1019 // Pairs of SVE predicate vector registers.
1020 def PSeqPairs : RegisterTuples<[psub0, psub1], [(rotl PPR, 0), (rotl PPR, 1)]>;
1022 def PPR2 : RegisterClass<"AArch64", [untyped], 16, (add PSeqPairs)> {
1026 class PPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
1027 let Name = "SVEPredicateList" # NumRegs # "x" # ElementWidth;
1028 let ParserMethod = "tryParseVectorList<RegKind::SVEPredicateVector>";
1029 let PredicateMethod = "isTypedVectorList<RegKind::SVEPredicateVector, "
1030 # NumRegs #", 0, "#ElementWidth #">";
1031 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_PReg, "
1035 def PP_b : RegisterOperand<PPR2, "printTypedVectorList<0,'b'>"> {
1036 let ParserMatchClass = PPRVectorList<8, 2>;
1039 def PP_h : RegisterOperand<PPR2, "printTypedVectorList<0,'h'>"> {
1040 let ParserMatchClass = PPRVectorList<16, 2>;
1043 def PP_s : RegisterOperand<PPR2, "printTypedVectorList<0,'s'>"> {
1044 let ParserMatchClass = PPRVectorList<32, 2>;
1047 def PP_d : RegisterOperand<PPR2, "printTypedVectorList<0,'d'>"> {
1048 let ParserMatchClass = PPRVectorList<64, 2>;
1051 // SVE2 multiple-of-2 multi-predicate-vector operands
1052 def PPR2Mul2 : RegisterClass<"AArch64", [untyped], 16, (add (decimate PSeqPairs, 2))> {
1056 class PPRVectorListMul<int ElementWidth, int NumRegs> : PPRVectorList<ElementWidth, NumRegs> {
1057 let Name = "SVEPredicateListMul" # NumRegs # "x" # ElementWidth;
1058 let DiagnosticType = "Invalid" # Name;
1059 let PredicateMethod =
1060 "isTypedVectorListMultiple<RegKind::SVEPredicateVector, " # NumRegs # ", 0, "
1061 # ElementWidth # ">";
1064 let EncoderMethod = "EncodeRegAsMultipleOf<2>",
1065 DecoderMethod = "DecodePPR2Mul2RegisterClass" in {
1066 def PP_b_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'b'>"> {
1067 let ParserMatchClass = PPRVectorListMul<8, 2>;
1070 def PP_h_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'h'>"> {
1071 let ParserMatchClass = PPRVectorListMul<16, 2>;
1074 def PP_s_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'s'>"> {
1075 let ParserMatchClass = PPRVectorListMul<32, 2>;
1078 def PP_d_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'d'>"> {
1079 let ParserMatchClass = PPRVectorListMul<64, 2>;
1081 } // end let EncoderMethod/DecoderMethod
1084 //******************************************************************************
1086 // SVE vector register classes
1087 class ZPRClass<int lastreg> : RegisterClass<"AArch64",
1088 [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
1089 nxv2f16, nxv4f16, nxv8f16,
1090 nxv2bf16, nxv4bf16, nxv8bf16,
1093 128, (sequence "Z%u", 0, lastreg)> {
1097 def ZPR : ZPRClass<31>;
1098 def ZPR_4b : ZPRClass<15>; // Restricted 4 bit SVE vector register class.
1099 def ZPR_3b : ZPRClass<7>; // Restricted 3 bit SVE vector register class.
1101 class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
1103 let Name = "SVE" # name # "Reg";
1104 let PredicateMethod = "isSVEDataVectorRegOfWidth<"
1105 # Width # ", AArch64::ZPR"
1106 # RegClassSuffix # "RegClassID>";
1107 let RenderMethod = "addRegOperands";
1108 let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
1109 let ParserMethod = "tryParseSVEDataVector<false, "
1110 # !if(!eq(Width, 0), "false", "true") # ">";
1113 def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
1114 def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>;
1115 def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>;
1116 def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>;
1117 def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>;
1118 def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>;
1120 def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>;
1121 def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>;
1122 def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>;
1123 def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>;
1124 def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>;
1125 def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
1127 def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
1128 def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
1129 def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
1131 def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>;
1132 def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
1133 def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
1135 def ZPRAsmOp4b8 : ZPRAsmOperand<"Vector4bB", 8, "_4b">;
1136 def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
1137 def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
1138 def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
1140 def ZPR4b8 : ZPRRegOp<"b", ZPRAsmOp4b8, ElementSizeB, ZPR_4b>;
1141 def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
1142 def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
1143 def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
1145 class FPRasZPR<int Width> : AsmOperandClass{
1146 let Name = "FPR" # Width # "asZPR";
1147 let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
1148 let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
1151 class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
1152 let ParserMatchClass = FPRasZPR<Width>;
1153 let PrintMethod = "printZPRasFPR<" # Width # ">";
1156 def FPR8asZPR : FPRasZPROperand<8>;
1157 def FPR16asZPR : FPRasZPROperand<16>;
1158 def FPR32asZPR : FPRasZPROperand<32>;
1159 def FPR64asZPR : FPRasZPROperand<64>;
1160 def FPR128asZPR : FPRasZPROperand<128>;
1162 let Namespace = "AArch64" in {
1163 def zsub0 : SubRegIndex<128, -1>;
1164 def zsub1 : SubRegIndex<128, -1>;
1165 def zsub2 : SubRegIndex<128, -1>;
1166 def zsub3 : SubRegIndex<128, -1>;
1169 // Pairs, triples, and quads of SVE vector registers.
1170 def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
1171 def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
1172 def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
1174 def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> {
1177 def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
1180 def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
1184 class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
1185 let Name = "SVEVectorList" # NumRegs # ElementWidth;
1186 let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
1187 let PredicateMethod =
1188 "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
1189 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
1192 def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> {
1193 let ParserMatchClass = ZPRVectorList<8, 1>;
1196 def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> {
1197 let ParserMatchClass = ZPRVectorList<16, 1>;
1200 def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> {
1201 let ParserMatchClass = ZPRVectorList<32, 1>;
1204 def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> {
1205 let ParserMatchClass = ZPRVectorList<64, 1>;
1208 def Z_q : RegisterOperand<ZPR, "printTypedVectorList<0,'q'>"> {
1209 let ParserMatchClass = ZPRVectorList<128, 1>;
1212 def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
1213 let ParserMatchClass = ZPRVectorList<8, 2>;
1216 def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
1217 let ParserMatchClass = ZPRVectorList<16, 2>;
1220 def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
1221 let ParserMatchClass = ZPRVectorList<32, 2>;
1224 def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
1225 let ParserMatchClass = ZPRVectorList<64, 2>;
1228 def ZZ_q : RegisterOperand<ZPR2, "printTypedVectorList<0,'q'>"> {
1229 let ParserMatchClass = ZPRVectorList<128, 2>;
1232 def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
1233 let ParserMatchClass = ZPRVectorList<8, 3>;
1236 def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
1237 let ParserMatchClass = ZPRVectorList<16, 3>;
1240 def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
1241 let ParserMatchClass = ZPRVectorList<32, 3>;
1244 def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
1245 let ParserMatchClass = ZPRVectorList<64, 3>;
1248 def ZZZ_q : RegisterOperand<ZPR3, "printTypedVectorList<0,'q'>"> {
1249 let ParserMatchClass = ZPRVectorList<128, 3>;
1252 def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
1253 let ParserMatchClass = ZPRVectorList<8, 4>;
1256 def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
1257 let ParserMatchClass = ZPRVectorList<16, 4>;
1260 def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
1261 let ParserMatchClass = ZPRVectorList<32, 4>;
1264 def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
1265 let ParserMatchClass = ZPRVectorList<64, 4>;
1268 def ZZZZ_q : RegisterOperand<ZPR4, "printTypedVectorList<0,'q'>"> {
1269 let ParserMatchClass = ZPRVectorList<128, 4>;
1272 // SME2 multiple-of-2 or 4 multi-vector operands
1273 def ZPR2Mul2 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqPairs, 2))> {
1277 def ZPR4Mul4 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqQuads, 4))> {
1281 class ZPRVectorListMul<int ElementWidth, int NumRegs> : ZPRVectorList<ElementWidth, NumRegs> {
1282 let Name = "SVEVectorListMul" # NumRegs # "x" # ElementWidth;
1283 let DiagnosticType = "Invalid" # Name;
1284 let PredicateMethod =
1285 "isTypedVectorListMultiple<RegKind::SVEDataVector, " # NumRegs # ", 0, "
1286 # ElementWidth # ">";
1289 let EncoderMethod = "EncodeRegAsMultipleOf<2>",
1290 DecoderMethod = "DecodeZPR2Mul2RegisterClass" in {
1291 def ZZ_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,0>"> {
1292 let ParserMatchClass = ZPRVectorListMul<0, 2>;
1295 def ZZ_b_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'b'>"> {
1296 let ParserMatchClass = ZPRVectorListMul<8, 2>;
1299 def ZZ_h_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'h'>"> {
1300 let ParserMatchClass = ZPRVectorListMul<16, 2>;
1303 def ZZ_s_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'s'>"> {
1304 let ParserMatchClass = ZPRVectorListMul<32, 2>;
1307 def ZZ_d_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'d'>"> {
1308 let ParserMatchClass = ZPRVectorListMul<64, 2>;
1311 def ZZ_q_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'q'>"> {
1312 let ParserMatchClass = ZPRVectorListMul<128, 2>;
1314 } // end let EncoderMethod/DecoderMethod
1316 let EncoderMethod = "EncodeRegAsMultipleOf<4>",
1317 DecoderMethod = "DecodeZPR4Mul4RegisterClass" in {
1318 def ZZZZ_b_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'b'>"> {
1319 let ParserMatchClass = ZPRVectorListMul<8, 4>;
1322 def ZZZZ_h_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'h'>"> {
1323 let ParserMatchClass = ZPRVectorListMul<16, 4>;
1326 def ZZZZ_s_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'s'>"> {
1327 let ParserMatchClass = ZPRVectorListMul<32, 4>;
1330 def ZZZZ_d_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'d'>"> {
1331 let ParserMatchClass = ZPRVectorListMul<64, 4>;
1334 def ZZZZ_q_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'q'>"> {
1335 let ParserMatchClass = ZPRVectorListMul<128, 4>;
1337 } // end let EncoderMethod/DecoderMethod
1339 // SME2 strided multi-vector operands
1343 // A group of two Z vectors with strided numbering consisting of:
1344 // Zn+0.T and Zn+8.T
1345 // where n is in the range 0 to 7 and 16 to 23 inclusive, and T is one of B, H,
1348 // Z0_Z8, Z1_Z9, Z2_Z10, Z3_Z11, Z4_Z12, Z5_Z13, Z6_Z14, Z7_Z15
1349 def ZStridedPairsLo : RegisterTuples<[zsub0, zsub1], [
1350 (trunc (rotl ZPR, 0), 8), (trunc (rotl ZPR, 8), 8)
1353 // Z16_Z24, Z17_Z25, Z18_Z26, Z19_Z27, Z20_Z28, Z21_Z29, Z22_Z30, Z23_Z31
1354 def ZStridedPairsHi : RegisterTuples<[zsub0, zsub1], [
1355 (trunc (rotl ZPR, 16), 8), (trunc (rotl ZPR, 24), 8)
1360 // A group of four Z vectors with strided numbering consisting of:
1361 // Zn+0.T, Zn+4.T, Zn+8.T and Zn+12.T
1362 // where n is in the range 0 to 3 and 16 to 19 inclusive, and T is one of B, H,
1365 // Z0_Z4_Z8_Z12, Z1_Z5_Z9_Z13, Z2_Z6_Z10_Z14, Z3_Z7_Z11_Z15
1366 def ZStridedQuadsLo : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
1367 (trunc (rotl ZPR, 0), 4), (trunc (rotl ZPR, 4), 4),
1368 (trunc (rotl ZPR, 8), 4), (trunc (rotl ZPR, 12), 4)
1370 // Z16_Z20_Z24_Z28, Z17_Z21_Z25_Z29, Z18_Z22_Z26_Z30, Z19_Z23_Z27_Z31
1371 def ZStridedQuadsHi : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
1372 (trunc (rotl ZPR, 16), 4), (trunc (rotl ZPR, 20), 4),
1373 (trunc (rotl ZPR, 24), 4), (trunc (rotl ZPR, 28), 4)
1376 def ZPR2Strided : RegisterClass<"AArch64", [untyped], 128,
1377 (add ZStridedPairsLo, ZStridedPairsHi)> {
1380 def ZPR4Strided : RegisterClass<"AArch64", [untyped], 128,
1381 (add ZStridedQuadsLo, ZStridedQuadsHi)> {
1385 def ZPR2StridedOrContiguous : RegisterClass<"AArch64", [untyped], 128,
1386 (add ZStridedPairsLo, ZStridedPairsHi,
1387 (decimate ZSeqPairs, 2))> {
1391 class ZPRVectorListStrided<int ElementWidth, int NumRegs, int Stride>
1392 : ZPRVectorList<ElementWidth, NumRegs> {
1393 let Name = "SVEVectorListStrided" # NumRegs # "x" # ElementWidth;
1394 let DiagnosticType = "Invalid" # Name;
1395 let PredicateMethod = "isTypedVectorListStrided<RegKind::SVEDataVector, "
1396 # NumRegs # "," # Stride # "," # ElementWidth # ">";
1397 let RenderMethod = "addStridedVectorListOperands<" # NumRegs # ">";
1400 let EncoderMethod = "EncodeZPR2StridedRegisterClass",
1401 DecoderMethod = "DecodeZPR2StridedRegisterClass" in {
1403 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'b'>"> {
1404 let ParserMatchClass = ZPRVectorListStrided<8, 2, 8>;
1408 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'h'>"> {
1409 let ParserMatchClass = ZPRVectorListStrided<16, 2, 8>;
1413 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'s'>"> {
1414 let ParserMatchClass = ZPRVectorListStrided<32, 2, 8>;
1418 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'d'>"> {
1419 let ParserMatchClass = ZPRVectorListStrided<64, 2, 8>;
1422 def ZZ_b_strided_and_contiguous
1423 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'b'>">;
1424 def ZZ_h_strided_and_contiguous
1425 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'h'>">;
1426 def ZZ_s_strided_and_contiguous
1427 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'s'>">;
1428 def ZZ_d_strided_and_contiguous
1429 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'d'>">;
1432 def ZPR4StridedOrContiguous : RegisterClass<"AArch64", [untyped], 128,
1433 (add ZStridedQuadsLo, ZStridedQuadsHi,
1434 (decimate ZSeqQuads, 4))> {
1438 let EncoderMethod = "EncodeZPR4StridedRegisterClass",
1439 DecoderMethod = "DecodeZPR4StridedRegisterClass" in {
1441 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'b'>"> {
1442 let ParserMatchClass = ZPRVectorListStrided<8, 4, 4>;
1446 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'h'>"> {
1447 let ParserMatchClass = ZPRVectorListStrided<16, 4, 4>;
1451 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'s'>"> {
1452 let ParserMatchClass = ZPRVectorListStrided<32, 4, 4>;
1456 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'d'>"> {
1457 let ParserMatchClass = ZPRVectorListStrided<64, 4, 4>;
1460 def ZZZZ_b_strided_and_contiguous
1461 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'b'>">;
1462 def ZZZZ_h_strided_and_contiguous
1463 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'h'>">;
1464 def ZZZZ_s_strided_and_contiguous
1465 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'s'>">;
1466 def ZZZZ_d_strided_and_contiguous
1467 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'d'>">;
1470 class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
1471 bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
1472 let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
1473 # !if(ScaleAlwaysSame, "Only", "");
1475 let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
1476 # RegWidth # ", AArch64::ZPRRegClassID, "
1477 # "AArch64_AM::" # ShiftExtend # ", "
1479 # !if(ScaleAlwaysSame, "true", "false")
1481 let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
1482 let RenderMethod = "addRegOperands";
1483 let ParserMethod = "tryParseSVEDataVector<true, true>";
1486 class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
1487 int RegWidth, int Scale, string Suffix = "">
1488 : RegisterOperand<ZPR> {
1489 let ParserMatchClass =
1490 !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
1491 let PrintMethod = "printRegWithShiftExtend<"
1492 # !if(SignExtend, "true", "false") # ", "
1494 # !if(IsLSL, "'x'", "'w'") # ", "
1495 # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
1498 foreach RegWidth = [32, 64] in {
1500 def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
1501 def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
1502 def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
1503 def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
1504 def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
1506 def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
1507 def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
1508 def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
1509 def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
1510 def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
1513 def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
1514 def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
1515 def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
1516 def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
1517 def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
1519 def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
1520 def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
1521 def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
1522 def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
1523 def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
1526 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
1527 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
1528 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
1529 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
1530 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
1531 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
1532 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
1533 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
1536 class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
1537 let Name = AsmOperandName # Scale;
1538 let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
1539 let DiagnosticType = "Invalid" # AsmOperandName # Scale;
1540 let RenderMethod = "addRegOperands";
1541 let ParserMethod = "tryParseGPROperand<true>";
1544 class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
1545 let ParserMatchClass = !cast<AsmOperandClass>(Name);
1546 let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
1549 foreach Scale = [8, 16, 32, 64, 128] in {
1550 def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
1551 def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
1553 def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
1554 def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;
1557 // Accumulator array tiles.
1558 def ZAQ0 : AArch64Reg<0, "za0.q">;
1559 def ZAQ1 : AArch64Reg<1, "za1.q">;
1560 def ZAQ2 : AArch64Reg<2, "za2.q">;
1561 def ZAQ3 : AArch64Reg<3, "za3.q">;
1562 def ZAQ4 : AArch64Reg<4, "za4.q">;
1563 def ZAQ5 : AArch64Reg<5, "za5.q">;
1564 def ZAQ6 : AArch64Reg<6, "za6.q">;
1565 def ZAQ7 : AArch64Reg<7, "za7.q">;
1566 def ZAQ8 : AArch64Reg<8, "za8.q">;
1567 def ZAQ9 : AArch64Reg<9, "za9.q">;
1568 def ZAQ10 : AArch64Reg<10, "za10.q">;
1569 def ZAQ11 : AArch64Reg<11, "za11.q">;
1570 def ZAQ12 : AArch64Reg<12, "za12.q">;
1571 def ZAQ13 : AArch64Reg<13, "za13.q">;
1572 def ZAQ14 : AArch64Reg<14, "za14.q">;
1573 def ZAQ15 : AArch64Reg<15, "za15.q">;
1575 let SubRegIndices = [zasubq0, zasubq1] in {
1576 def ZAD0 : AArch64Reg<0, "za0.d", [ZAQ0, ZAQ8]>;
1577 def ZAD1 : AArch64Reg<1, "za1.d", [ZAQ1, ZAQ9]>;
1578 def ZAD2 : AArch64Reg<2, "za2.d", [ZAQ2, ZAQ10]>;
1579 def ZAD3 : AArch64Reg<3, "za3.d", [ZAQ3, ZAQ11]>;
1580 def ZAD4 : AArch64Reg<4, "za4.d", [ZAQ4, ZAQ12]>;
1581 def ZAD5 : AArch64Reg<5, "za5.d", [ZAQ5, ZAQ13]>;
1582 def ZAD6 : AArch64Reg<6, "za6.d", [ZAQ6, ZAQ14]>;
1583 def ZAD7 : AArch64Reg<7, "za7.d", [ZAQ7, ZAQ15]>;
1586 let SubRegIndices = [zasubd0, zasubd1] in {
1587 def ZAS0 : AArch64Reg<0, "za0.s", [ZAD0, ZAD4]>;
1588 def ZAS1 : AArch64Reg<1, "za1.s", [ZAD1, ZAD5]>;
1589 def ZAS2 : AArch64Reg<2, "za2.s", [ZAD2, ZAD6]>;
1590 def ZAS3 : AArch64Reg<3, "za3.s", [ZAD3, ZAD7]>;
1593 let SubRegIndices = [zasubs0, zasubs1] in {
1594 def ZAH0 : AArch64Reg<0, "za0.h", [ZAS0, ZAS2]>;
1595 def ZAH1 : AArch64Reg<1, "za1.h", [ZAS1, ZAS3]>;
1598 let SubRegIndices = [zasubh0, zasubh1] in {
1599 def ZAB0 : AArch64Reg<0, "za0.b", [ZAH0, ZAH1]>;
1602 let SubRegIndices = [zasubb] in {
1603 def ZA : AArch64Reg<0, "za", [ZAB0]>;
1606 def ZT0 : AArch64Reg<0, "zt0">;
1608 // SME Register Classes
1610 let isAllocatable = 0 in {
1611 // Accumulator array
1612 def MPR : RegisterClass<"AArch64", [untyped], 2048, (add ZA)> {
1616 // Accumulator array as single tiles
1617 def MPR8 : RegisterClass<"AArch64", [untyped], 2048, (add (sequence "ZAB%u", 0, 0))> {
1620 def MPR16 : RegisterClass<"AArch64", [untyped], 1024, (add (sequence "ZAH%u", 0, 1))> {
1623 def MPR32 : RegisterClass<"AArch64", [untyped], 512, (add (sequence "ZAS%u", 0, 3))> {
1626 def MPR64 : RegisterClass<"AArch64", [untyped], 256, (add (sequence "ZAD%u", 0, 7))> {
1629 def MPR128 : RegisterClass<"AArch64", [untyped], 128, (add (sequence "ZAQ%u", 0, 15))> {
1634 def ZTR : RegisterClass<"AArch64", [untyped], 512, (add ZT0)> {
1636 let DiagnosticType = "InvalidLookupTable";
1638 // SME Register Operands
1639 // There are three types of SME matrix register operands:
1642 // These tiles make up the larger accumulator matrix. The tile representation
1643 // has an element type suffix, e.g. za0.b or za15.q and can be any of the
1653 // Their representation is similar to regular tiles, but they have an extra
1654 // 'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile,
1655 // horizontally or vertically.
1657 // e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and
1658 // ZAQ15, respectively. The horizontal/vertical is more a property of the
1659 // instruction, than a property of the asm-operand itself, or its register.
1660 // The distinction is required for the parsing/printing of the operand,
1661 // as from a compiler's perspective, the whole tile is read/written.
1663 // * Accumulator matrix:
1665 // This is the entire matrix accumulator register ZA (<=> ZAB0), printed as
1672 class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass {
1673 let Name = "MatrixTile" # EltSize;
1674 let DiagnosticType = "Invalid" # Name;
1675 let ParserMethod = "tryParseMatrixRegister";
1676 let RenderMethod = "addMatrixOperands";
1677 let PredicateMethod = "isMatrixRegOperand<"
1678 # "MatrixKind::Tile" # ", "
1679 # EltSize # ", AArch64::" # RC # "RegClassID>";
1682 class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC>
1683 : RegisterOperand<RC> {
1684 let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>;
1685 let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1686 let PrintMethod = "printMatrixTile";
1689 def TileOp16 : MatrixTileOperand<16, 1, MPR16>;
1690 def TileOp32 : MatrixTileOperand<32, 2, MPR32>;
1691 def TileOp64 : MatrixTileOperand<64, 3, MPR64>;
1694 // Tile vectors (horizontal and vertical)
1697 class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical>
1699 let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize;
1700 let DiagnosticType = "Invalid" # Name;
1701 let ParserMethod = "tryParseMatrixRegister";
1702 let RenderMethod = "addMatrixOperands";
1703 let PredicateMethod = "isMatrixRegOperand<"
1705 # !if(IsVertical, "Col", "Row") # ", "
1706 # EltSize # ", AArch64::" # RC # "RegClassID>";
1709 class MatrixTileVectorOperand<int EltSize, int NumBitsForTile,
1710 RegisterClass RC, int IsVertical>
1711 : RegisterOperand<RC> {
1712 let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize,
1714 let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1715 let PrintMethod = "printMatrixTileVector<" # IsVertical # ">";
1718 def TileVectorOpH8 : MatrixTileVectorOperand< 8, 0, MPR8, 0>;
1719 def TileVectorOpH16 : MatrixTileVectorOperand< 16, 1, MPR16, 0>;
1720 def TileVectorOpH32 : MatrixTileVectorOperand< 32, 2, MPR32, 0>;
1721 def TileVectorOpH64 : MatrixTileVectorOperand< 64, 3, MPR64, 0>;
1722 def TileVectorOpH128 : MatrixTileVectorOperand<128, 4, MPR128, 0>;
1724 def TileVectorOpV8 : MatrixTileVectorOperand< 8, 0, MPR8, 1>;
1725 def TileVectorOpV16 : MatrixTileVectorOperand< 16, 1, MPR16, 1>;
1726 def TileVectorOpV32 : MatrixTileVectorOperand< 32, 2, MPR32, 1>;
1727 def TileVectorOpV64 : MatrixTileVectorOperand< 64, 3, MPR64, 1>;
1728 def TileVectorOpV128 : MatrixTileVectorOperand<128, 4, MPR128, 1>;
1731 // Accumulator matrix
1734 class MatrixAsmOperand<string RC, int EltSize> : AsmOperandClass {
1735 let Name = "Matrix" # !if(EltSize, !cast<string>(EltSize), "");
1736 let DiagnosticType = "Invalid" # Name;
1737 let ParserMethod = "tryParseMatrixRegister";
1738 let RenderMethod = "addMatrixOperands";
1739 let PredicateMethod = "isMatrixRegOperand<"
1740 # "MatrixKind::Array" # ", "
1741 # EltSize # ", AArch64::" # RC # "RegClassID>";
1744 class MatrixOperand<RegisterClass RC, int EltSize> : RegisterOperand<RC> {
1745 let ParserMatchClass = MatrixAsmOperand<!cast<string>(RC), EltSize>;
1746 let PrintMethod = "printMatrix<" # EltSize # ">";
1749 def MatrixOp : MatrixOperand<MPR, 0>;
1750 // SME2 register operands and classes
1751 def MatrixOp8 : MatrixOperand<MPR, 8>;
1752 def MatrixOp16 : MatrixOperand<MPR, 16>;
1753 def MatrixOp32 : MatrixOperand<MPR, 32>;
1754 def MatrixOp64 : MatrixOperand<MPR, 64>;
1756 class MatrixTileListAsmOperand : AsmOperandClass {
1757 let Name = "MatrixTileList";
1758 let ParserMethod = "tryParseMatrixTileList";
1759 let RenderMethod = "addMatrixTileListOperands";
1760 let PredicateMethod = "isMatrixTileList";
1763 class MatrixTileListOperand : Operand<i8> {
1764 let ParserMatchClass = MatrixTileListAsmOperand<>;
1765 let DecoderMethod = "DecodeMatrixTileListRegisterClass";
1766 let EncoderMethod = "EncodeMatrixTileListRegisterClass";
1767 let PrintMethod = "printMatrixTileList";
1770 def MatrixTileList : MatrixTileListOperand<>;
1772 def MatrixIndexGPR32_8_11 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 8, 11)> {
1773 let DiagnosticType = "InvalidMatrixIndexGPR32_8_11";
1775 def MatrixIndexGPR32_12_15 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 12, 15)> {
1776 let DiagnosticType = "InvalidMatrixIndexGPR32_12_15";
1778 def MatrixIndexGPR32Op8_11 : RegisterOperand<MatrixIndexGPR32_8_11> {
1779 let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W8>";
1781 def MatrixIndexGPR32Op12_15 : RegisterOperand<MatrixIndexGPR32_12_15> {
1782 let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W12>";
1785 def SVCROperand : AsmOperandClass {
1787 let ParserMethod = "tryParseSVCR";
1788 let DiagnosticType = "Invalid" # Name;
1791 def svcr_op : Operand<i32>, TImmLeaf<i32, [{
1792 return AArch64SVCR::lookupSVCRByEncoding(Imm) != nullptr;
1794 let ParserMatchClass = SVCROperand;
1795 let PrintMethod = "printSVCROp";
1796 let DecoderMethod = "DecodeSVCROp";
1797 let MCOperandPredicate = [{
1800 return AArch64SVCR::lookupSVCRByEncoding(MCOp.getImm()) != nullptr;
1804 //===----------------------------------------------------------------------===//
1805 // Register categories.
1808 def GeneralPurposeRegisters : RegisterCategory<[GPR64, GPR32]>;
1810 def FIXED_REGS : RegisterClass<"AArch64", [i64], 64, (add FP, SP, VG, FFR)>;
1811 def FixedRegisters : RegisterCategory<[CCR, FIXED_REGS]>;