1 //===-- PPCRegisterInfo.td - The PowerPC Register File -----*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
12 let Namespace = "PPC" in {
13 def sub_lt : SubRegIndex<1>;
14 def sub_gt : SubRegIndex<1, 1>;
15 def sub_eq : SubRegIndex<1, 2>;
16 def sub_un : SubRegIndex<1, 3>;
17 def sub_32 : SubRegIndex<32>;
18 def sub_64 : SubRegIndex<64>;
19 def sub_vsx0 : SubRegIndex<128>;
20 def sub_vsx1 : SubRegIndex<128, 128>;
21 def sub_pair0 : SubRegIndex<256>;
22 def sub_pair1 : SubRegIndex<256, 256>;
23 def sub_gp8_x0 : SubRegIndex<64>;
24 def sub_gp8_x1 : SubRegIndex<64, 64>;
28 class PPCReg<string n> : Register<n> {
29 let Namespace = "PPC";
32 // We identify all our registers with a 5-bit ID, for consistency's sake.
34 // GPR - One of the 32 32-bit general-purpose registers
35 class GPR<bits<5> num, string n> : PPCReg<n> {
36 let HWEncoding{4-0} = num;
39 // GP8 - One of the 32 64-bit general-purpose registers
40 class GP8<GPR SubReg, string n> : PPCReg<n> {
41 let HWEncoding = SubReg.HWEncoding;
42 let SubRegs = [SubReg];
43 let SubRegIndices = [sub_32];
46 // SPE - One of the 32 64-bit general-purpose registers (SPE)
47 class SPE<GPR SubReg, string n> : PPCReg<n> {
48 let HWEncoding = SubReg.HWEncoding;
49 let SubRegs = [SubReg];
50 let SubRegIndices = [sub_32];
53 // SPR - One of the 32-bit special-purpose registers
54 class SPR<bits<10> num, string n> : PPCReg<n> {
55 let HWEncoding{9-0} = num;
58 // FPR - One of the 32 64-bit floating-point registers
59 class FPR<bits<5> num, string n> : PPCReg<n> {
60 let HWEncoding{4-0} = num;
63 // VF - One of the 32 64-bit floating-point subregisters of the vector
64 // registers (used by VSX).
65 class VF<bits<5> num, string n> : PPCReg<n> {
66 let HWEncoding{4-0} = num;
67 let HWEncoding{5} = 1;
70 // VR - One of the 32 128-bit vector registers
71 class VR<VF SubReg, string n> : PPCReg<n> {
72 let HWEncoding{4-0} = SubReg.HWEncoding{4-0};
73 let HWEncoding{5} = 0;
74 let SubRegs = [SubReg];
75 let SubRegIndices = [sub_64];
78 // VSRL - One of the 32 128-bit VSX registers that overlap with the scalar
79 // floating-point registers.
80 class VSRL<FPR SubReg, string n> : PPCReg<n> {
81 let HWEncoding = SubReg.HWEncoding;
82 let SubRegs = [SubReg];
83 let SubRegIndices = [sub_64];
86 // VSXReg - One of the VSX registers in the range vs32-vs63 with numbering
87 // and encoding to match.
88 class VSXReg<bits<6> num, string n> : PPCReg<n> {
89 let HWEncoding{5-0} = num;
92 // CR - One of the 8 4-bit condition registers
93 class CR<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
94 let HWEncoding{2-0} = num;
95 let SubRegs = subregs;
98 // CRBIT - One of the 32 1-bit condition register fields
99 class CRBIT<bits<5> num, string n> : PPCReg<n> {
100 let HWEncoding{4-0} = num;
103 // ACC - One of the 8 512-bit VSX accumulators.
104 class ACC<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
105 let HWEncoding{2-0} = num;
106 let SubRegs = subregs;
109 // UACC - One of the 8 512-bit VSX accumulators prior to being primed.
110 // Without using this register class, the register allocator has no way to
111 // differentiate a primed accumulator from an unprimed accumulator.
112 // This may result in invalid copies between primed and unprimed accumulators.
113 class UACC<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
114 let HWEncoding{2-0} = num;
115 let SubRegs = subregs;
118 // VSR Pairs - One of the 32 paired even-odd consecutive VSRs.
119 class VSRPair<bits<5> num, string n, list<Register> subregs> : PPCReg<n> {
120 let HWEncoding{4-0} = num;
121 let SubRegs = subregs;
124 // GP8Pair - Consecutive even-odd paired GP8.
125 class GP8Pair<string n, bits<5> EvenIndex> : PPCReg<n> {
126 assert !eq(EvenIndex{0}, 0), "Index should be even.";
127 let HWEncoding{4-0} = EvenIndex;
128 let SubRegs = [!cast<GP8>("X"#EvenIndex), !cast<GP8>("X"#!add(EvenIndex, 1))];
129 let DwarfNumbers = [-1, -1];
130 let SubRegIndices = [sub_gp8_x0, sub_gp8_x1];
133 // General-purpose registers
134 foreach Index = 0-31 in {
135 def R#Index : GPR<Index, "r"#Index>, DwarfRegNum<[-2, Index]>;
138 // 64-bit General-purpose registers
139 foreach Index = 0-31 in {
140 def X#Index : GP8<!cast<GPR>("R"#Index), "r"#Index>,
141 DwarfRegNum<[Index, -2]>;
145 foreach Index = 0-31 in {
146 def S#Index : SPE<!cast<GPR>("R"#Index), "r"#Index>,
147 DwarfRegNum<[!add(Index, 1200), !add(Index, 1200)]>;
150 // Floating-point registers
151 foreach Index = 0-31 in {
152 def F#Index : FPR<Index, "f"#Index>,
153 DwarfRegNum<[!add(Index, 32), !add(Index, 32)]>;
156 // 64-bit Floating-point subregisters of Altivec registers
157 // Note: the register names are v0-v31 or vs32-vs63 depending on the use.
158 // Custom C++ code is used to produce the correct name and encoding.
159 foreach Index = 0-31 in {
160 def VF#Index : VF<Index, "v" #Index>,
161 DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
165 foreach Index = 0-31 in {
166 def V#Index : VR<!cast<VF>("VF"#Index), "v"#Index>,
167 DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
171 foreach Index = 0-31 in {
172 def VSL#Index : VSRL<!cast<FPR>("F"#Index), "vs"#Index>,
173 DwarfRegAlias<!cast<FPR>("F"#Index)>;
176 // Dummy VSX registers, this defines string: "vs32"-"vs63", and is only used for
178 foreach Index = 32-63 in {
179 def VSX#Index : VSXReg<Index, "vs"#Index>;
182 let SubRegIndices = [sub_vsx0, sub_vsx1] in {
183 // VSR pairs 0 - 15 (corresponding to VSRs 0 - 30 paired with 1 - 31).
184 foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
185 def VSRp#!srl(Index, 1) : VSRPair<!srl(Index, 1), "vsp"#Index,
186 [!cast<VSRL>("VSL"#Index), !cast<VSRL>("VSL"#!add(Index, 1))]>,
187 DwarfRegNum<[-1, -1]>;
190 // VSR pairs 16 - 31 (corresponding to VSRs 32 - 62 paired with 33 - 63).
191 foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
192 def VSRp#!add(!srl(Index, 1), 16) :
193 VSRPair<!add(!srl(Index, 1), 16), "vsp"#!add(Index, 32),
194 [!cast<VR>("V"#Index), !cast<VR>("V"#!add(Index, 1))]>,
195 DwarfRegNum<[-1, -1]>;
199 // 16 paired even-odd consecutive GP8s.
200 foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
201 def G8p#!srl(Index, 1) : GP8Pair<"r"#Index, Index>;
204 // The representation of r0 when treated as the constant 0.
205 def ZERO : GPR<0, "0">, DwarfRegAlias<R0>;
206 def ZERO8 : GP8<ZERO, "0">, DwarfRegAlias<X0>;
208 // Representations of the frame pointer used by ISD::FRAMEADDR.
209 def FP : GPR<0 /* arbitrary */, "**FRAME POINTER**">;
210 def FP8 : GP8<FP, "**FRAME POINTER**">;
212 // Representations of the base pointer used by setjmp.
213 def BP : GPR<0 /* arbitrary */, "**BASE POINTER**">;
214 def BP8 : GP8<BP, "**BASE POINTER**">;
216 // Condition register bits
217 def CR0LT : CRBIT< 0, "0">;
218 def CR0GT : CRBIT< 1, "1">;
219 def CR0EQ : CRBIT< 2, "2">;
220 def CR0UN : CRBIT< 3, "3">;
221 def CR1LT : CRBIT< 4, "4">;
222 def CR1GT : CRBIT< 5, "5">;
223 def CR1EQ : CRBIT< 6, "6">;
224 def CR1UN : CRBIT< 7, "7">;
225 def CR2LT : CRBIT< 8, "8">;
226 def CR2GT : CRBIT< 9, "9">;
227 def CR2EQ : CRBIT<10, "10">;
228 def CR2UN : CRBIT<11, "11">;
229 def CR3LT : CRBIT<12, "12">;
230 def CR3GT : CRBIT<13, "13">;
231 def CR3EQ : CRBIT<14, "14">;
232 def CR3UN : CRBIT<15, "15">;
233 def CR4LT : CRBIT<16, "16">;
234 def CR4GT : CRBIT<17, "17">;
235 def CR4EQ : CRBIT<18, "18">;
236 def CR4UN : CRBIT<19, "19">;
237 def CR5LT : CRBIT<20, "20">;
238 def CR5GT : CRBIT<21, "21">;
239 def CR5EQ : CRBIT<22, "22">;
240 def CR5UN : CRBIT<23, "23">;
241 def CR6LT : CRBIT<24, "24">;
242 def CR6GT : CRBIT<25, "25">;
243 def CR6EQ : CRBIT<26, "26">;
244 def CR6UN : CRBIT<27, "27">;
245 def CR7LT : CRBIT<28, "28">;
246 def CR7GT : CRBIT<29, "29">;
247 def CR7EQ : CRBIT<30, "30">;
248 def CR7UN : CRBIT<31, "31">;
250 // Condition registers
251 let SubRegIndices = [sub_lt, sub_gt, sub_eq, sub_un] in {
252 def CR0 : CR<0, "cr0", [CR0LT, CR0GT, CR0EQ, CR0UN]>, DwarfRegNum<[68, 68]>;
253 def CR1 : CR<1, "cr1", [CR1LT, CR1GT, CR1EQ, CR1UN]>, DwarfRegNum<[69, 69]>;
254 def CR2 : CR<2, "cr2", [CR2LT, CR2GT, CR2EQ, CR2UN]>, DwarfRegNum<[70, 70]>;
255 def CR3 : CR<3, "cr3", [CR3LT, CR3GT, CR3EQ, CR3UN]>, DwarfRegNum<[71, 71]>;
256 def CR4 : CR<4, "cr4", [CR4LT, CR4GT, CR4EQ, CR4UN]>, DwarfRegNum<[72, 72]>;
257 def CR5 : CR<5, "cr5", [CR5LT, CR5GT, CR5EQ, CR5UN]>, DwarfRegNum<[73, 73]>;
258 def CR6 : CR<6, "cr6", [CR6LT, CR6GT, CR6EQ, CR6UN]>, DwarfRegNum<[74, 74]>;
259 def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75, 75]>;
263 def LR : SPR<8, "lr">, DwarfRegNum<[-2, 65]>;
264 //let Aliases = [LR] in
265 def LR8 : SPR<8, "lr">, DwarfRegNum<[65, -2]>;
268 def CTR : SPR<9, "ctr">, DwarfRegNum<[-2, 66]>;
269 def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]>;
272 def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>;
274 // SPE extra registers
275 // SPE Accumulator for multiply-accumulate SPE operations. Never directly
276 // accessed, so there's no real encoding for it.
277 def SPEACC: DwarfRegNum<[99, 111]>;
278 def SPEFSCR: SPR<512, "spefscr">, DwarfRegNum<[612, 112]>;
280 def XER: SPR<1, "xer">, DwarfRegNum<[76]>;
282 // Carry bit. In the architecture this is really bit 0 of the XER register
283 // (which really is SPR register 1); this is the only bit interesting to a
285 def CARRY: SPR<1, "xer">, DwarfRegNum<[76]> {
289 // FP rounding mode: bits 30 and 31 of the FP status and control register
290 // This is not allocated as a normal register; it appears only in
291 // Uses and Defs. The ABI says it needs to be preserved by a function,
292 // but this is not achieved by saving and restoring it as with
293 // most registers, it has to be done in code; to make this work all the
294 // return and call instructions are described as Uses of RM, so instructions
295 // that do nothing but change RM will not get deleted.
296 def RM: PPCReg<"**ROUNDING MODE**">;
299 // Allocate volatiles first
300 // then nonvolatiles in reverse order since stmw/lmw save from rN to r31
301 def GPRC : RegisterClass<"PPC", [i32,f32], 32, (add (sequence "R%u", 2, 12),
302 (sequence "R%u", 30, 13),
303 R31, R0, R1, FP, BP)> {
304 // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
305 // put it at the end of the list.
306 // On AIX, CSRs are allocated starting from R31 according to:
307 // https://www.ibm.com/docs/en/ssw_aix_72/assembler/assembler_pdf.pdf.
308 // This also helps setting the correct `NumOfGPRsSaved' in traceback table.
309 let AltOrders = [(add (sub GPRC, R2), R2),
310 (add (sequence "R%u", 2, 12),
311 (sequence "R%u", 31, 13), R0, R1, FP, BP)];
312 let AltOrderSelect = [{
313 return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
317 def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12),
318 (sequence "X%u", 30, 14),
319 X31, X13, X0, X1, FP8, BP8)> {
320 // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
321 // put it at the end of the list.
322 let AltOrders = [(add (sub G8RC, X2), X2),
323 (add (sequence "X%u", 2, 12),
324 (sequence "X%u", 31, 13), X0, X1, FP8, BP8)];
325 let AltOrderSelect = [{
326 return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
330 // For some instructions r0 is special (representing the value 0 instead of
331 // the value in the r0 register), and we use these register subclasses to
332 // prevent r0 from being allocated for use by those instructions.
333 def GPRC_NOR0 : RegisterClass<"PPC", [i32,f32], 32, (add (sub GPRC, R0), ZERO)> {
334 // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
335 // put it at the end of the list.
336 let AltOrders = [(add (sub GPRC_NOR0, R2), R2),
337 (add (sequence "R%u", 2, 12),
338 (sequence "R%u", 31, 13), R1, FP, BP, ZERO)];
339 let AltOrderSelect = [{
340 return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
344 def G8RC_NOX0 : RegisterClass<"PPC", [i64], 64, (add (sub G8RC, X0), ZERO8)> {
345 // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
346 // put it at the end of the list.
347 let AltOrders = [(add (sub G8RC_NOX0, X2), X2),
348 (add (sequence "X%u", 2, 12),
349 (sequence "X%u", 31, 13), X1, FP8, BP8, ZERO8)];
350 let AltOrderSelect = [{
351 return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
355 def SPERC : RegisterClass<"PPC", [f64], 64, (add (sequence "S%u", 2, 12),
356 (sequence "S%u", 30, 13),
359 // Allocate volatiles first, then non-volatiles in reverse order. With the SVR4
360 // ABI the size of the Floating-point register save area is determined by the
361 // allocated non-volatile register with the lowest register number, as FP
362 // register N is spilled to offset 8 * (32 - N) below the back chain word of the
363 // previous stack frame. By allocating non-volatiles in reverse order we make
364 // sure that the Floating-point register save area is always as small as
365 // possible because there aren't any unused spill slots.
366 def F8RC : RegisterClass<"PPC", [f64], 64, (add (sequence "F%u", 0, 13),
367 (sequence "F%u", 31, 14))>;
368 def F4RC : RegisterClass<"PPC", [f32], 32, (add F8RC)>;
370 def VRRC : RegisterClass<"PPC",
371 [v16i8,v8i16,v4i32,v2i64,v1i128,v4f32,v2f64, f128],
373 (add V2, V3, V4, V5, V0, V1, V6, V7, V8, V9, V10, V11,
374 V12, V13, V14, V15, V16, V17, V18, V19, V31, V30,
375 V29, V28, V27, V26, V25, V24, V23, V22, V21, V20)>;
377 // VSX register classes (the allocation order mirrors that of the corresponding
378 // subregister classes).
379 def VSLRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
380 (add (sequence "VSL%u", 0, 13),
381 (sequence "VSL%u", 31, 14))>;
382 def VSRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
385 // Register classes for the 64-bit "scalar" VSX subregisters.
386 def VFRC : RegisterClass<"PPC", [f64], 64,
387 (add VF2, VF3, VF4, VF5, VF0, VF1, VF6, VF7,
388 VF8, VF9, VF10, VF11, VF12, VF13, VF14,
389 VF15, VF16, VF17, VF18, VF19, VF31, VF30,
390 VF29, VF28, VF27, VF26, VF25, VF24, VF23,
392 def VSFRC : RegisterClass<"PPC", [f64], 64, (add F8RC, VFRC)>;
394 // Allow spilling GPR's into caller-saved VSR's.
395 def SPILLTOVSRRC : RegisterClass<"PPC", [i64, f64], 64, (add G8RC, (sub VSFRC,
396 (sequence "VF%u", 31, 20),
397 (sequence "F%u", 31, 14)))>;
399 // Register class for single precision scalars in VSX registers
400 def VSSRC : RegisterClass<"PPC", [f32], 32, (add VSFRC)>;
402 def CRBITRC : RegisterClass<"PPC", [i1], 32,
403 (add CR2LT, CR2GT, CR2EQ, CR2UN,
404 CR3LT, CR3GT, CR3EQ, CR3UN,
405 CR4LT, CR4GT, CR4EQ, CR4UN,
406 CR5LT, CR5GT, CR5EQ, CR5UN,
407 CR6LT, CR6GT, CR6EQ, CR6UN,
408 CR7LT, CR7GT, CR7EQ, CR7UN,
409 CR1LT, CR1GT, CR1EQ, CR1UN,
410 CR0LT, CR0GT, CR0EQ, CR0UN)> {
412 let AltOrders = [(sub CRBITRC, CR2LT, CR2GT, CR2EQ, CR2UN, CR3LT, CR3GT,
413 CR3EQ, CR3UN, CR4LT, CR4GT, CR4EQ, CR4UN)];
414 let AltOrderSelect = [{
415 return MF.getSubtarget<PPCSubtarget>().isELFv2ABI() &&
416 MF.getInfo<PPCFunctionInfo>()->isNonVolatileCRDisabled();
420 def CRRC : RegisterClass<"PPC", [i32], 32,
421 (add CR0, CR1, CR5, CR6,
422 CR7, CR2, CR3, CR4)> {
423 let AltOrders = [(sub CRRC, CR2, CR3, CR4)];
424 let AltOrderSelect = [{
425 return MF.getSubtarget<PPCSubtarget>().isELFv2ABI() &&
426 MF.getInfo<PPCFunctionInfo>()->isNonVolatileCRDisabled();
429 // The CTR registers are not allocatable because they're used by the
430 // decrement-and-branch instructions, and thus need to stay live across
431 // multiple basic blocks.
432 def CTRRC : RegisterClass<"PPC", [i32], 32, (add CTR)> {
433 let isAllocatable = 0;
435 def CTRRC8 : RegisterClass<"PPC", [i64], 64, (add CTR8)> {
436 let isAllocatable = 0;
439 def LRRC : RegisterClass<"PPC", [i32], 32, (add LR)> {
440 let isAllocatable = 0;
442 def LR8RC : RegisterClass<"PPC", [i64], 64, (add LR8)> {
443 let isAllocatable = 0;
446 def VRSAVERC : RegisterClass<"PPC", [i32], 32, (add VRSAVE)>;
447 def CARRYRC : RegisterClass<"PPC", [i32], 32, (add CARRY, XER)> {
451 let SubRegIndices = [sub_pair0, sub_pair1] in {
452 def ACC0 : ACC<0, "acc0", [VSRp0, VSRp1]>, DwarfRegNum<[-1, -1]>;
453 def ACC1 : ACC<1, "acc1", [VSRp2, VSRp3]>, DwarfRegNum<[-1, -1]>;
454 def ACC2 : ACC<2, "acc2", [VSRp4, VSRp5]>, DwarfRegNum<[-1, -1]>;
455 def ACC3 : ACC<3, "acc3", [VSRp6, VSRp7]>, DwarfRegNum<[-1, -1]>;
456 def ACC4 : ACC<4, "acc4", [VSRp8, VSRp9]>, DwarfRegNum<[-1, -1]>;
457 def ACC5 : ACC<5, "acc5", [VSRp10, VSRp11]>, DwarfRegNum<[-1, -1]>;
458 def ACC6 : ACC<6, "acc6", [VSRp12, VSRp13]>, DwarfRegNum<[-1, -1]>;
459 def ACC7 : ACC<7, "acc7", [VSRp14, VSRp15]>, DwarfRegNum<[-1, -1]>;
461 def ACCRC : RegisterClass<"PPC", [v512i1], 128, (add ACC0, ACC1, ACC2, ACC3,
462 ACC4, ACC5, ACC6, ACC7)> {
463 // The AllocationPriority is in the range [0, 63]. Assigned the ACC registers
464 // the highest possible priority in this range to force the register allocator
465 // to assign these registers first. This is done because the ACC registers
466 // must represent 4 advacent vector registers. For example ACC1 must be
467 // VS4 - VS7. The value here must be at least 32 as we want to allocate
468 // these registers even before we allocate global ranges.
469 let AllocationPriority = 63;
473 let SubRegIndices = [sub_pair0, sub_pair1] in {
474 def UACC0 : UACC<0, "acc0", [VSRp0, VSRp1]>, DwarfRegNum<[-1, -1]>;
475 def UACC1 : UACC<1, "acc1", [VSRp2, VSRp3]>, DwarfRegNum<[-1, -1]>;
476 def UACC2 : UACC<2, "acc2", [VSRp4, VSRp5]>, DwarfRegNum<[-1, -1]>;
477 def UACC3 : UACC<3, "acc3", [VSRp6, VSRp7]>, DwarfRegNum<[-1, -1]>;
478 def UACC4 : UACC<4, "acc4", [VSRp8, VSRp9]>, DwarfRegNum<[-1, -1]>;
479 def UACC5 : UACC<5, "acc5", [VSRp10, VSRp11]>, DwarfRegNum<[-1, -1]>;
480 def UACC6 : UACC<6, "acc6", [VSRp12, VSRp13]>, DwarfRegNum<[-1, -1]>;
481 def UACC7 : UACC<7, "acc7", [VSRp14, VSRp15]>, DwarfRegNum<[-1, -1]>;
483 def UACCRC : RegisterClass<"PPC", [v512i1], 128,
484 (add UACC0, UACC1, UACC2, UACC3,
485 UACC4, UACC5, UACC6, UACC7)> {
486 // The AllocationPriority for the UACC registers is still high and must be at
487 // least 32 as we want to allocate these registers before we allocate other
488 // global ranges. The value must be less than the AllocationPriority of the
490 let AllocationPriority = 36;
494 // FIXME: This allocation order may increase stack frame size when allocating
495 // non-volatile registers.
497 // Placing Altivec registers first and allocate the rest as underlying VSX
498 // ones, to reduce interference with accumulator registers (lower 32 VSRs).
499 // This reduces copies when loading for accumulators, which is common use for
500 // paired VSX registers.
502 RegisterClass<"PPC", [v256i1], 128,
503 (add VSRp17, VSRp18, VSRp16, VSRp19, VSRp20, VSRp21,
504 VSRp22, VSRp23, VSRp24, VSRp25, VSRp31, VSRp30,
505 VSRp29, VSRp28, VSRp27, VSRp26,
506 (sequence "VSRp%u", 0, 6),
507 (sequence "VSRp%u", 15, 7))> {
508 // Give the VSRp registers a non-zero AllocationPriority. The value is less
509 // than 32 as these registers should not always be allocated before global
510 // ranges and the value should be less than the AllocationPriority - 32 for
511 // the UACC registers. Even global VSRp registers should be allocated after
512 // the UACC registers have been chosen.
513 let AllocationPriority = 2;
517 // Make AllocationOrder as similar as G8RC's to avoid potential spilling.
518 // Similarly, we have an AltOrder for 64-bit ELF ABI which r2 is allocated
521 RegisterClass<"PPC", [i128], 128,
522 (add (sequence "G8p%u", 1, 5),
523 (sequence "G8p%u", 14, 7),
524 G8p15, G8p6, G8p0)> {
525 let AltOrders = [(add (sub G8pRC, G8p1), G8p1)];
526 let AltOrderSelect = [{
527 return MF.getSubtarget<PPCSubtarget>().is64BitELFABI();