3 * Purpose: Machine check handling specific defines
5 * Copyright (C) 1999 Silicon Graphics, Inc.
6 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
7 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
8 * Copyright (C) 2000 Hewlett-Packard Co.
9 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
10 * Copyright (C) 2002 Intel Corp.
11 * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
12 * Copyright (C) 2005 Silicon Graphics, Inc
13 * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
15 #ifndef _ASM_IA64_MCA_ASM_H
16 #define _ASM_IA64_MCA_ASM_H
18 #include <asm/percpu.h>
29 * This macro converts a instruction virtual address to a physical address
30 * Right now for simulation purposes the virtual addresses are
31 * direct mapped to physical addresses.
32 * 1. Lop off bits 61 thru 63 in the virtual address
34 #define INST_VA_TO_PA(addr) \
35 dep addr = 0, addr, 61, 3
37 * This macro converts a data virtual address to a physical address
38 * Right now for simulation purposes the virtual addresses are
39 * direct mapped to physical addresses.
40 * 1. Lop off bits 61 thru 63 in the virtual address
42 #define DATA_VA_TO_PA(addr) \
45 * This macro converts a data physical address to a virtual address
46 * Right now for simulation purposes the virtual addresses are
47 * direct mapped to physical addresses.
48 * 1. Put 0x7 in bits 61 thru 63.
50 #define DATA_PA_TO_VA(addr,temp) \
52 dep addr = temp, addr, 61, 3
54 #define GET_THIS_PADDR(reg, var) \
55 mov reg = IA64_KR(PER_CPU_DATA);; \
56 addl reg = THIS_CPU(var), reg
59 * This macro jumps to the instruction at the given virtual address
60 * and starts execution in physical mode with all the address
61 * translations turned off.
62 * 1. Save the current psr
63 * 2. Make sure that all the upper 32 bits are off
65 * 3. Clear the interrupt enable and interrupt state collection bits
66 * in the psr before updating the ipsr and iip.
68 * 4. Turn off the instruction, data and rse translation bits of the psr
69 * and store the new value into ipsr
70 * Also make sure that the interrupts are disabled.
71 * Ensure that we are in little endian mode.
72 * [psr.{rt, it, dt, i, be} = 0]
74 * 5. Get the physical address corresponding to the virtual address
75 * of the next instruction bundle and put it in iip.
76 * (Using magic numbers 24 and 40 in the deposint instruction since
77 * the IA64_SDK code directly maps to lower 24bits as physical address
78 * from a virtual address).
80 * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
82 #define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
85 dep old_psr = 0, old_psr, 32, 32; \
90 mov temp2 = ar.bspstore; \
92 DATA_VA_TO_PA(temp2); \
94 mov temp1 = ar.rnat; \
96 mov ar.bspstore = temp2; \
98 mov ar.rnat = temp1; \
103 dep temp2 = 0, temp2, PSR_IC, 2; \
108 dep temp1 = 0, temp1, 32, 32; \
110 dep temp1 = 0, temp1, PSR_IT, 1; \
112 dep temp1 = 0, temp1, PSR_DT, 1; \
114 dep temp1 = 0, temp1, PSR_RT, 1; \
116 dep temp1 = 0, temp1, PSR_I, 1; \
118 dep temp1 = 0, temp1, PSR_IC, 1; \
120 dep temp1 = -1, temp1, PSR_MC, 1; \
122 mov cr.ipsr = temp1; \
124 LOAD_PHYSICAL(p0, temp2, start_addr); \
126 mov cr.iip = temp2; \
141 * This macro jumps to the instruction at the given virtual address
142 * and starts execution in virtual mode with all the address
143 * translations turned on.
144 * 1. Get the old saved psr
146 * 2. Clear the interrupt state collection bit in the current psr.
148 * 3. Set the instruction translation bit back in the old psr
149 * Note we have to do this since we are right now saving only the
150 * lower 32-bits of old psr.(Also the old psr has the data and
151 * rse translation bits on)
153 * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
155 * 5. Reset the current thread pointer (r13).
157 * 6. Set iip to the virtual address of the next instruction bundle.
159 * 7. Do an rfi to move ipsr to psr and iip to ip.
162 #define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
165 mov old_psr = temp2; \
167 dep temp2 = 0, temp2, PSR_IC, 2; \
174 mov temp2 = ar.bspstore; \
176 DATA_PA_TO_VA(temp2,temp1); \
178 mov temp1 = ar.rnat; \
180 mov ar.bspstore = temp2; \
182 mov ar.rnat = temp1; \
184 mov temp1 = old_psr; \
188 dep temp1 = temp2, temp1, PSR_IC, 1; \
190 dep temp1 = temp2, temp1, PSR_IT, 1; \
192 dep temp1 = temp2, temp1, PSR_DT, 1; \
194 dep temp1 = temp2, temp1, PSR_RT, 1; \
196 dep temp1 = temp2, temp1, PSR_BN, 1; \
199 mov cr.ipsr = temp1; \
200 movl temp2 = start_addr; \
202 mov cr.iip = temp2; \
205 DATA_PA_TO_VA(sp, temp1); \
215 * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
216 * stacks, except that the SAL/OS state and a switch_stack are stored near the
217 * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
218 * well as MCA over INIT, each event needs its own SAL/OS state. All entries
219 * are 16 byte aligned.
221 * +---------------------------+
223 * +---------------------------+
225 * +---------------------------+
227 * +---------------------------+
228 * | 16 byte scratch area |
229 * +---------------------------+ <-------- SP at start of C MCA handler
231 * +---------------------------+
232 * | RBS for MCA/INIT handler |
233 * +---------------------------+
234 * | struct task for MCA/INIT |
235 * +---------------------------+ <-------- Bottom of MCA/INIT stack
238 #define ALIGN16(x) ((x)&~15)
239 #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
240 #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
241 #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
242 #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
244 #endif /* _ASM_IA64_MCA_ASM_H */