bna: fix interrupts storm caused by erroneous packets
[linux/fpc-iii.git] / arch / hexagon / include / uapi / asm / registers.h
blobe7be31840a90330d2abc472eceece6e3d15926df
1 /*
2 * Register definitions for the Hexagon architecture
3 */
6 #ifndef _ASM_REGISTERS_H
7 #define _ASM_REGISTERS_H
9 #ifndef __ASSEMBLY__
11 /* See kernel/entry.S for further documentation. */
14 * Entry code copies the event record out of guest registers into
15 * this structure (which is on the stack).
18 struct hvm_event_record {
19 unsigned long vmel; /* Event Linkage (return address) */
20 unsigned long vmest; /* Event context - pre-event SSR values */
21 unsigned long vmpsp; /* Previous stack pointer */
22 unsigned long vmbadva; /* Bad virtual address for addressing events */
25 struct pt_regs {
26 long restart_r0; /* R0 checkpoint for syscall restart */
27 long syscall_nr; /* Only used in system calls */
28 union {
29 struct {
30 unsigned long usr;
31 unsigned long preds;
33 long long int predsusr;
35 union {
36 struct {
37 unsigned long m0;
38 unsigned long m1;
40 long long int m1m0;
42 union {
43 struct {
44 unsigned long sa1;
45 unsigned long lc1;
47 long long int lc1sa1;
49 union {
50 struct {
51 unsigned long sa0;
52 unsigned long lc0;
54 long long int lc0sa0;
56 union {
57 struct {
58 unsigned long ugp;
59 unsigned long gp;
61 long long int gpugp;
63 union {
64 struct {
65 unsigned long cs0;
66 unsigned long cs1;
68 long long int cs1cs0;
71 * Be extremely careful with rearranging these, if at all. Some code
72 * assumes the 32 registers exist exactly like this in memory;
73 * e.g. kernel/ptrace.c
74 * e.g. kernel/signal.c (restore_sigcontext)
76 union {
77 struct {
78 unsigned long r00;
79 unsigned long r01;
81 long long int r0100;
83 union {
84 struct {
85 unsigned long r02;
86 unsigned long r03;
88 long long int r0302;
90 union {
91 struct {
92 unsigned long r04;
93 unsigned long r05;
95 long long int r0504;
97 union {
98 struct {
99 unsigned long r06;
100 unsigned long r07;
102 long long int r0706;
104 union {
105 struct {
106 unsigned long r08;
107 unsigned long r09;
109 long long int r0908;
111 union {
112 struct {
113 unsigned long r10;
114 unsigned long r11;
116 long long int r1110;
118 union {
119 struct {
120 unsigned long r12;
121 unsigned long r13;
123 long long int r1312;
125 union {
126 struct {
127 unsigned long r14;
128 unsigned long r15;
130 long long int r1514;
132 union {
133 struct {
134 unsigned long r16;
135 unsigned long r17;
137 long long int r1716;
139 union {
140 struct {
141 unsigned long r18;
142 unsigned long r19;
144 long long int r1918;
146 union {
147 struct {
148 unsigned long r20;
149 unsigned long r21;
151 long long int r2120;
153 union {
154 struct {
155 unsigned long r22;
156 unsigned long r23;
158 long long int r2322;
160 union {
161 struct {
162 unsigned long r24;
163 unsigned long r25;
165 long long int r2524;
167 union {
168 struct {
169 unsigned long r26;
170 unsigned long r27;
172 long long int r2726;
174 union {
175 struct {
176 unsigned long r28;
177 unsigned long r29;
179 long long int r2928;
181 union {
182 struct {
183 unsigned long r30;
184 unsigned long r31;
186 long long int r3130;
188 /* VM dispatch pushes event record onto stack - we can build on it */
189 struct hvm_event_record hvmer;
192 /* Defines to conveniently access the values */
195 * As of the VM spec 0.5, these registers are now set/retrieved via a
196 * VM call. On the in-bound side, we just fetch the values
197 * at the entry points and stuff them into the old record in pt_regs.
198 * However, on the outbound side, probably at VM rte, we set the
199 * registers back.
202 #define pt_elr(regs) ((regs)->hvmer.vmel)
203 #define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
204 #define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
205 #define user_mode(regs) \
206 (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
207 #define ints_enabled(regs) \
208 (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
209 #define pt_psp(regs) ((regs)->hvmer.vmpsp)
210 #define pt_badva(regs) ((regs)->hvmer.vmbadva)
212 #define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))
213 #define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
215 #define pt_set_rte_sp(regs, sp) do {\
216 pt_psp(regs) = (regs)->r29 = (sp);\
217 } while (0)
219 #define pt_set_kmode(regs) \
220 (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
222 #define pt_set_usermode(regs) \
223 (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
224 | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
226 #endif /* ifndef __ASSEMBLY */
228 #endif