2 * Register definitions for the Hexagon architecture
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #ifndef _ASM_REGISTERS_H
22 #define _ASM_REGISTERS_H
28 /* See kernel/entry.S for further documentation. */
31 * Entry code copies the event record out of guest registers into
32 * this structure (which is on the stack).
35 struct hvm_event_record
{
36 unsigned long vmel
; /* Event Linkage (return address) */
37 unsigned long vmest
; /* Event context - pre-event SSR values */
38 unsigned long vmpsp
; /* Previous stack pointer */
39 unsigned long vmbadva
; /* Bad virtual address for addressing events */
43 long restart_r0
; /* R0 checkpoint for syscall restart */
44 long syscall_nr
; /* Only used in system calls */
50 long long int predsusr
;
81 * Be extremely careful with rearranging these, if at all. Some code
82 * assumes the 32 registers exist exactly like this in memory;
83 * e.g. kernel/ptrace.c
84 * e.g. kernel/signal.c (restore_sigcontext)
198 /* VM dispatch pushes event record onto stack - we can build on it */
199 struct hvm_event_record hvmer
;
202 /* Defines to conveniently access the values */
205 * As of the VM spec 0.5, these registers are now set/retrieved via a
206 * VM call. On the in-bound side, we just fetch the values
207 * at the entry points and stuff them into the old record in pt_regs.
208 * However, on the outbound side, probably at VM rte, we set the
212 #define pt_elr(regs) ((regs)->hvmer.vmel)
213 #define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
214 #define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
215 #define user_mode(regs) \
216 (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
217 #define ints_enabled(regs) \
218 (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
219 #define pt_psp(regs) ((regs)->hvmer.vmpsp)
220 #define pt_badva(regs) ((regs)->hvmer.vmbadva)
222 #define pt_set_rte_sp(regs, sp) do {\
223 pt_psp(regs) = (sp);\
224 (regs)->SP = (unsigned long) &((regs)->hvmer);\
227 #define pt_set_kmode(regs) \
228 (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
230 #define pt_set_usermode(regs) \
231 (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
232 | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
234 #endif /* ifndef __ASSEMBLY */