spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / powerpc / include / asm / ppc_asm.h
blob368f72f798087b331b31300c9c9f2aa5dec50588
1 /*
2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
3 */
4 #ifndef _ASM_POWERPC_PPC_ASM_H
5 #define _ASM_POWERPC_PPC_ASM_H
7 #include <linux/init.h>
8 #include <linux/stringify.h>
9 #include <asm/asm-compat.h>
10 #include <asm/processor.h>
11 #include <asm/ppc-opcode.h>
12 #include <asm/firmware.h>
14 #ifndef __ASSEMBLY__
15 #error __FILE__ should only be used in assembler files
16 #else
18 #define SZL (BITS_PER_LONG/8)
21 * Stuff for accurate CPU time accounting.
22 * These macros handle transitions between user and system state
23 * in exception entry and exit and accumulate time to the
24 * user_time and system_time fields in the paca.
27 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
28 #define ACCOUNT_CPU_USER_ENTRY(ra, rb)
29 #define ACCOUNT_CPU_USER_EXIT(ra, rb)
30 #define ACCOUNT_STOLEN_TIME
31 #else
32 #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
33 beq 2f; /* if from kernel mode */ \
34 MFTB(ra); /* get timebase */ \
35 ld rb,PACA_STARTTIME_USER(r13); \
36 std ra,PACA_STARTTIME(r13); \
37 subf rb,rb,ra; /* subtract start value */ \
38 ld ra,PACA_USER_TIME(r13); \
39 add ra,ra,rb; /* add on to user time */ \
40 std ra,PACA_USER_TIME(r13); \
43 #define ACCOUNT_CPU_USER_EXIT(ra, rb) \
44 MFTB(ra); /* get timebase */ \
45 ld rb,PACA_STARTTIME(r13); \
46 std ra,PACA_STARTTIME_USER(r13); \
47 subf rb,rb,ra; /* subtract start value */ \
48 ld ra,PACA_SYSTEM_TIME(r13); \
49 add ra,ra,rb; /* add on to system time */ \
50 std ra,PACA_SYSTEM_TIME(r13)
52 #ifdef CONFIG_PPC_SPLPAR
53 #define ACCOUNT_STOLEN_TIME \
54 BEGIN_FW_FTR_SECTION; \
55 beq 33f; \
56 /* from user - see if there are any DTL entries to process */ \
57 ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
58 ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
59 ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \
60 cmpd cr1,r11,r10; \
61 beq+ cr1,33f; \
62 bl .accumulate_stolen_time; \
63 33: \
64 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
66 #else /* CONFIG_PPC_SPLPAR */
67 #define ACCOUNT_STOLEN_TIME
69 #endif /* CONFIG_PPC_SPLPAR */
71 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
74 * Macros for storing registers into and loading registers from
75 * exception frames.
77 #ifdef __powerpc64__
78 #define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
79 #define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
80 #define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
81 #define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
82 #else
83 #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
84 #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
85 #define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
86 SAVE_10GPRS(22, base)
87 #define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
88 REST_10GPRS(22, base)
89 #endif
91 #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
92 #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
93 #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
94 #define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
95 #define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
96 #define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
97 #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
98 #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
100 #define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
101 #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
102 #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
103 #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
104 #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
105 #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
106 #define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
107 #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
108 #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
109 #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
110 #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
111 #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
113 #define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b
114 #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
115 #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
116 #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
117 #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
118 #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
119 #define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b
120 #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
121 #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
122 #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
123 #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
124 #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
126 /* Save the lower 32 VSRs in the thread VSR region */
127 #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b)
128 #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
129 #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
130 #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
131 #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
132 #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
133 #define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b)
134 #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
135 #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
136 #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
137 #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
138 #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
139 /* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
140 #define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b)
141 #define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
142 #define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
143 #define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
144 #define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
145 #define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
146 #define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b)
147 #define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
148 #define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
149 #define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
150 #define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
151 #define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
154 * b = base register for addressing, o = base offset from register of 1st EVR
155 * n = first EVR, s = scratch
157 #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
158 #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
159 #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
160 #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
161 #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
162 #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
163 #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
164 #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
165 #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
166 #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
167 #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
168 #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
170 /* Macros to adjust thread priority for hardware multithreading */
171 #define HMT_VERY_LOW or 31,31,31 # very low priority
172 #define HMT_LOW or 1,1,1
173 #define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
174 #define HMT_MEDIUM or 2,2,2
175 #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
176 #define HMT_HIGH or 3,3,3
177 #define HMT_EXTRA_HIGH or 7,7,7 # power7 only
179 #ifdef __KERNEL__
180 #ifdef CONFIG_PPC64
182 #define XGLUE(a,b) a##b
183 #define GLUE(a,b) XGLUE(a,b)
185 #define _GLOBAL(name) \
186 .section ".text"; \
187 .align 2 ; \
188 .globl name; \
189 .globl GLUE(.,name); \
190 .section ".opd","aw"; \
191 name: \
192 .quad GLUE(.,name); \
193 .quad .TOC.@tocbase; \
194 .quad 0; \
195 .previous; \
196 .type GLUE(.,name),@function; \
197 GLUE(.,name):
199 #define _INIT_GLOBAL(name) \
200 __REF; \
201 .align 2 ; \
202 .globl name; \
203 .globl GLUE(.,name); \
204 .section ".opd","aw"; \
205 name: \
206 .quad GLUE(.,name); \
207 .quad .TOC.@tocbase; \
208 .quad 0; \
209 .previous; \
210 .type GLUE(.,name),@function; \
211 GLUE(.,name):
213 #define _KPROBE(name) \
214 .section ".kprobes.text","a"; \
215 .align 2 ; \
216 .globl name; \
217 .globl GLUE(.,name); \
218 .section ".opd","aw"; \
219 name: \
220 .quad GLUE(.,name); \
221 .quad .TOC.@tocbase; \
222 .quad 0; \
223 .previous; \
224 .type GLUE(.,name),@function; \
225 GLUE(.,name):
227 #define _STATIC(name) \
228 .section ".text"; \
229 .align 2 ; \
230 .section ".opd","aw"; \
231 name: \
232 .quad GLUE(.,name); \
233 .quad .TOC.@tocbase; \
234 .quad 0; \
235 .previous; \
236 .type GLUE(.,name),@function; \
237 GLUE(.,name):
239 #define _INIT_STATIC(name) \
240 __REF; \
241 .align 2 ; \
242 .section ".opd","aw"; \
243 name: \
244 .quad GLUE(.,name); \
245 .quad .TOC.@tocbase; \
246 .quad 0; \
247 .previous; \
248 .type GLUE(.,name),@function; \
249 GLUE(.,name):
251 #else /* 32-bit */
253 #define _ENTRY(n) \
254 .globl n; \
257 #define _GLOBAL(n) \
258 .text; \
259 .stabs __stringify(n:F-1),N_FUN,0,0,n;\
260 .globl n; \
263 #define _KPROBE(n) \
264 .section ".kprobes.text","a"; \
265 .globl n; \
268 #endif
271 * LOAD_REG_IMMEDIATE(rn, expr)
272 * Loads the value of the constant expression 'expr' into register 'rn'
273 * using immediate instructions only. Use this when it's important not
274 * to reference other data (i.e. on ppc64 when the TOC pointer is not
275 * valid) and when 'expr' is a constant or absolute address.
277 * LOAD_REG_ADDR(rn, name)
278 * Loads the address of label 'name' into register 'rn'. Use this when
279 * you don't particularly need immediate instructions only, but you need
280 * the whole address in one register (e.g. it's a structure address and
281 * you want to access various offsets within it). On ppc32 this is
282 * identical to LOAD_REG_IMMEDIATE.
284 * LOAD_REG_ADDRBASE(rn, name)
285 * ADDROFF(name)
286 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
287 * register 'rn'. ADDROFF(name) returns the remainder of the address as
288 * a constant expression. ADDROFF(name) is a signed expression < 16 bits
289 * in size, so is suitable for use directly as an offset in load and store
290 * instructions. Use this when loading/storing a single word or less as:
291 * LOAD_REG_ADDRBASE(rX, name)
292 * ld rY,ADDROFF(name)(rX)
294 #ifdef __powerpc64__
295 #define LOAD_REG_IMMEDIATE(reg,expr) \
296 lis (reg),(expr)@highest; \
297 ori (reg),(reg),(expr)@higher; \
298 rldicr (reg),(reg),32,31; \
299 oris (reg),(reg),(expr)@h; \
300 ori (reg),(reg),(expr)@l;
302 #define LOAD_REG_ADDR(reg,name) \
303 ld (reg),name@got(r2)
305 #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
306 #define ADDROFF(name) 0
308 /* offsets for stack frame layout */
309 #define LRSAVE 16
311 #else /* 32-bit */
313 #define LOAD_REG_IMMEDIATE(reg,expr) \
314 lis (reg),(expr)@ha; \
315 addi (reg),(reg),(expr)@l;
317 #define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
319 #define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha
320 #define ADDROFF(name) name@l
322 /* offsets for stack frame layout */
323 #define LRSAVE 4
325 #endif
327 /* various errata or part fixups */
328 #ifdef CONFIG_PPC601_SYNC_FIX
329 #define SYNC \
330 BEGIN_FTR_SECTION \
331 sync; \
332 isync; \
333 END_FTR_SECTION_IFSET(CPU_FTR_601)
334 #define SYNC_601 \
335 BEGIN_FTR_SECTION \
336 sync; \
337 END_FTR_SECTION_IFSET(CPU_FTR_601)
338 #define ISYNC_601 \
339 BEGIN_FTR_SECTION \
340 isync; \
341 END_FTR_SECTION_IFSET(CPU_FTR_601)
342 #else
343 #define SYNC
344 #define SYNC_601
345 #define ISYNC_601
346 #endif
348 #ifdef CONFIG_PPC_CELL
349 #define MFTB(dest) \
350 90: mftb dest; \
351 BEGIN_FTR_SECTION_NESTED(96); \
352 cmpwi dest,0; \
353 beq- 90b; \
354 END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
355 #else
356 #define MFTB(dest) mftb dest
357 #endif
359 #ifndef CONFIG_SMP
360 #define TLBSYNC
361 #else /* CONFIG_SMP */
362 /* tlbsync is not implemented on 601 */
363 #define TLBSYNC \
364 BEGIN_FTR_SECTION \
365 tlbsync; \
366 sync; \
367 END_FTR_SECTION_IFCLR(CPU_FTR_601)
368 #endif
372 * This instruction is not implemented on the PPC 603 or 601; however, on
373 * the 403GCX and 405GP tlbia IS defined and tlbie is not.
374 * All of these instructions exist in the 8xx, they have magical powers,
375 * and they must be used.
378 #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
379 #define tlbia \
380 li r4,1024; \
381 mtctr r4; \
382 lis r4,KERNELBASE@h; \
383 0: tlbie r4; \
384 addi r4,r4,0x1000; \
385 bdnz 0b
386 #endif
389 #ifdef CONFIG_IBM440EP_ERR42
390 #define PPC440EP_ERR42 isync
391 #else
392 #define PPC440EP_ERR42
393 #endif
396 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
397 * keep the address intact to be compatible with code shared with
398 * 32-bit classic.
400 * On the other hand, I find it useful to have them behave as expected
401 * by their name (ie always do the addition) on 64-bit BookE
403 #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
404 #define toreal(rd)
405 #define fromreal(rd)
408 * We use addis to ensure compatibility with the "classic" ppc versions of
409 * these macros, which use rs = 0 to get the tophys offset in rd, rather than
410 * converting the address in r0, and so this version has to do that too
411 * (i.e. set register rd to 0 when rs == 0).
413 #define tophys(rd,rs) \
414 addis rd,rs,0
416 #define tovirt(rd,rs) \
417 addis rd,rs,0
419 #elif defined(CONFIG_PPC64)
420 #define toreal(rd) /* we can access c000... in real mode */
421 #define fromreal(rd)
423 #define tophys(rd,rs) \
424 clrldi rd,rs,2
426 #define tovirt(rd,rs) \
427 rotldi rd,rs,16; \
428 ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
429 rotldi rd,rd,48
430 #else
432 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
433 * physical base address of RAM at compile time.
435 #define toreal(rd) tophys(rd,rd)
436 #define fromreal(rd) tovirt(rd,rd)
438 #define tophys(rd,rs) \
439 0: addis rd,rs,-PAGE_OFFSET@h; \
440 .section ".vtop_fixup","aw"; \
441 .align 1; \
442 .long 0b; \
443 .previous
445 #define tovirt(rd,rs) \
446 0: addis rd,rs,PAGE_OFFSET@h; \
447 .section ".ptov_fixup","aw"; \
448 .align 1; \
449 .long 0b; \
450 .previous
451 #endif
453 #ifdef CONFIG_PPC_BOOK3S_64
454 #define RFI rfid
455 #define MTMSRD(r) mtmsrd r
456 #else
457 #define FIX_SRR1(ra, rb)
458 #ifndef CONFIG_40x
459 #define RFI rfi
460 #else
461 #define RFI rfi; b . /* Prevent prefetch past rfi */
462 #endif
463 #define MTMSRD(r) mtmsr r
464 #define CLR_TOP32(r)
465 #endif
467 #endif /* __KERNEL__ */
469 /* The boring bits... */
471 /* Condition Register Bit Fields */
473 #define cr0 0
474 #define cr1 1
475 #define cr2 2
476 #define cr3 3
477 #define cr4 4
478 #define cr5 5
479 #define cr6 6
480 #define cr7 7
483 /* General Purpose Registers (GPRs) */
485 #define r0 0
486 #define r1 1
487 #define r2 2
488 #define r3 3
489 #define r4 4
490 #define r5 5
491 #define r6 6
492 #define r7 7
493 #define r8 8
494 #define r9 9
495 #define r10 10
496 #define r11 11
497 #define r12 12
498 #define r13 13
499 #define r14 14
500 #define r15 15
501 #define r16 16
502 #define r17 17
503 #define r18 18
504 #define r19 19
505 #define r20 20
506 #define r21 21
507 #define r22 22
508 #define r23 23
509 #define r24 24
510 #define r25 25
511 #define r26 26
512 #define r27 27
513 #define r28 28
514 #define r29 29
515 #define r30 30
516 #define r31 31
519 /* Floating Point Registers (FPRs) */
521 #define fr0 0
522 #define fr1 1
523 #define fr2 2
524 #define fr3 3
525 #define fr4 4
526 #define fr5 5
527 #define fr6 6
528 #define fr7 7
529 #define fr8 8
530 #define fr9 9
531 #define fr10 10
532 #define fr11 11
533 #define fr12 12
534 #define fr13 13
535 #define fr14 14
536 #define fr15 15
537 #define fr16 16
538 #define fr17 17
539 #define fr18 18
540 #define fr19 19
541 #define fr20 20
542 #define fr21 21
543 #define fr22 22
544 #define fr23 23
545 #define fr24 24
546 #define fr25 25
547 #define fr26 26
548 #define fr27 27
549 #define fr28 28
550 #define fr29 29
551 #define fr30 30
552 #define fr31 31
554 /* AltiVec Registers (VPRs) */
556 #define vr0 0
557 #define vr1 1
558 #define vr2 2
559 #define vr3 3
560 #define vr4 4
561 #define vr5 5
562 #define vr6 6
563 #define vr7 7
564 #define vr8 8
565 #define vr9 9
566 #define vr10 10
567 #define vr11 11
568 #define vr12 12
569 #define vr13 13
570 #define vr14 14
571 #define vr15 15
572 #define vr16 16
573 #define vr17 17
574 #define vr18 18
575 #define vr19 19
576 #define vr20 20
577 #define vr21 21
578 #define vr22 22
579 #define vr23 23
580 #define vr24 24
581 #define vr25 25
582 #define vr26 26
583 #define vr27 27
584 #define vr28 28
585 #define vr29 29
586 #define vr30 30
587 #define vr31 31
589 /* VSX Registers (VSRs) */
591 #define vsr0 0
592 #define vsr1 1
593 #define vsr2 2
594 #define vsr3 3
595 #define vsr4 4
596 #define vsr5 5
597 #define vsr6 6
598 #define vsr7 7
599 #define vsr8 8
600 #define vsr9 9
601 #define vsr10 10
602 #define vsr11 11
603 #define vsr12 12
604 #define vsr13 13
605 #define vsr14 14
606 #define vsr15 15
607 #define vsr16 16
608 #define vsr17 17
609 #define vsr18 18
610 #define vsr19 19
611 #define vsr20 20
612 #define vsr21 21
613 #define vsr22 22
614 #define vsr23 23
615 #define vsr24 24
616 #define vsr25 25
617 #define vsr26 26
618 #define vsr27 27
619 #define vsr28 28
620 #define vsr29 29
621 #define vsr30 30
622 #define vsr31 31
623 #define vsr32 32
624 #define vsr33 33
625 #define vsr34 34
626 #define vsr35 35
627 #define vsr36 36
628 #define vsr37 37
629 #define vsr38 38
630 #define vsr39 39
631 #define vsr40 40
632 #define vsr41 41
633 #define vsr42 42
634 #define vsr43 43
635 #define vsr44 44
636 #define vsr45 45
637 #define vsr46 46
638 #define vsr47 47
639 #define vsr48 48
640 #define vsr49 49
641 #define vsr50 50
642 #define vsr51 51
643 #define vsr52 52
644 #define vsr53 53
645 #define vsr54 54
646 #define vsr55 55
647 #define vsr56 56
648 #define vsr57 57
649 #define vsr58 58
650 #define vsr59 59
651 #define vsr60 60
652 #define vsr61 61
653 #define vsr62 62
654 #define vsr63 63
656 /* SPE Registers (EVPRs) */
658 #define evr0 0
659 #define evr1 1
660 #define evr2 2
661 #define evr3 3
662 #define evr4 4
663 #define evr5 5
664 #define evr6 6
665 #define evr7 7
666 #define evr8 8
667 #define evr9 9
668 #define evr10 10
669 #define evr11 11
670 #define evr12 12
671 #define evr13 13
672 #define evr14 14
673 #define evr15 15
674 #define evr16 16
675 #define evr17 17
676 #define evr18 18
677 #define evr19 19
678 #define evr20 20
679 #define evr21 21
680 #define evr22 22
681 #define evr23 23
682 #define evr24 24
683 #define evr25 25
684 #define evr26 26
685 #define evr27 27
686 #define evr28 28
687 #define evr29 29
688 #define evr30 30
689 #define evr31 31
691 /* some stab codes */
692 #define N_FUN 36
693 #define N_RSYM 64
694 #define N_SLINE 68
695 #define N_SO 100
697 #endif /* __ASSEMBLY__ */
699 #endif /* _ASM_POWERPC_PPC_ASM_H */