Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / hexagon / include / uapi / asm / registers.h
blobd51270f3b35821100e36851f6dd80242d0322cdf
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 * Register definitions for the Hexagon architecture
4 */
7 #ifndef _ASM_REGISTERS_H
8 #define _ASM_REGISTERS_H
10 #ifndef __ASSEMBLY__
12 /* See kernel/entry.S for further documentation. */
15 * Entry code copies the event record out of guest registers into
16 * this structure (which is on the stack).
19 struct hvm_event_record {
20 unsigned long vmel; /* Event Linkage (return address) */
21 unsigned long vmest; /* Event context - pre-event SSR values */
22 unsigned long vmpsp; /* Previous stack pointer */
23 unsigned long vmbadva; /* Bad virtual address for addressing events */
26 struct pt_regs {
27 long restart_r0; /* R0 checkpoint for syscall restart */
28 long syscall_nr; /* Only used in system calls */
29 union {
30 struct {
31 unsigned long usr;
32 unsigned long preds;
34 long long int predsusr;
36 union {
37 struct {
38 unsigned long m0;
39 unsigned long m1;
41 long long int m1m0;
43 union {
44 struct {
45 unsigned long sa1;
46 unsigned long lc1;
48 long long int lc1sa1;
50 union {
51 struct {
52 unsigned long sa0;
53 unsigned long lc0;
55 long long int lc0sa0;
57 union {
58 struct {
59 unsigned long ugp;
60 unsigned long gp;
62 long long int gpugp;
64 union {
65 struct {
66 unsigned long cs0;
67 unsigned long cs1;
69 long long int cs1cs0;
72 * Be extremely careful with rearranging these, if at all. Some code
73 * assumes the 32 registers exist exactly like this in memory;
74 * e.g. kernel/ptrace.c
75 * e.g. kernel/signal.c (restore_sigcontext)
77 union {
78 struct {
79 unsigned long r00;
80 unsigned long r01;
82 long long int r0100;
84 union {
85 struct {
86 unsigned long r02;
87 unsigned long r03;
89 long long int r0302;
91 union {
92 struct {
93 unsigned long r04;
94 unsigned long r05;
96 long long int r0504;
98 union {
99 struct {
100 unsigned long r06;
101 unsigned long r07;
103 long long int r0706;
105 union {
106 struct {
107 unsigned long r08;
108 unsigned long r09;
110 long long int r0908;
112 union {
113 struct {
114 unsigned long r10;
115 unsigned long r11;
117 long long int r1110;
119 union {
120 struct {
121 unsigned long r12;
122 unsigned long r13;
124 long long int r1312;
126 union {
127 struct {
128 unsigned long r14;
129 unsigned long r15;
131 long long int r1514;
133 union {
134 struct {
135 unsigned long r16;
136 unsigned long r17;
138 long long int r1716;
140 union {
141 struct {
142 unsigned long r18;
143 unsigned long r19;
145 long long int r1918;
147 union {
148 struct {
149 unsigned long r20;
150 unsigned long r21;
152 long long int r2120;
154 union {
155 struct {
156 unsigned long r22;
157 unsigned long r23;
159 long long int r2322;
161 union {
162 struct {
163 unsigned long r24;
164 unsigned long r25;
166 long long int r2524;
168 union {
169 struct {
170 unsigned long r26;
171 unsigned long r27;
173 long long int r2726;
175 union {
176 struct {
177 unsigned long r28;
178 unsigned long r29;
180 long long int r2928;
182 union {
183 struct {
184 unsigned long r30;
185 unsigned long r31;
187 long long int r3130;
189 /* VM dispatch pushes event record onto stack - we can build on it */
190 struct hvm_event_record hvmer;
193 /* Defines to conveniently access the values */
196 * As of the VM spec 0.5, these registers are now set/retrieved via a
197 * VM call. On the in-bound side, we just fetch the values
198 * at the entry points and stuff them into the old record in pt_regs.
199 * However, on the outbound side, probably at VM rte, we set the
200 * registers back.
203 #define pt_elr(regs) ((regs)->hvmer.vmel)
204 #define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
205 #define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
206 #define user_mode(regs) \
207 (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
208 #define ints_enabled(regs) \
209 (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
210 #define pt_psp(regs) ((regs)->hvmer.vmpsp)
211 #define pt_badva(regs) ((regs)->hvmer.vmbadva)
213 #define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))
214 #define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
216 #define pt_set_rte_sp(regs, sp) do {\
217 pt_psp(regs) = (regs)->r29 = (sp);\
218 } while (0)
220 #define pt_set_kmode(regs) \
221 (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
223 #define pt_set_usermode(regs) \
224 (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
225 | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
227 #endif /* ifndef __ASSEMBLY */
229 #endif