kernel: trap-dependent state restore, trace fixes
[minix.git] / kernel / arch / arm / arch_system.c
blob8852cd1235fc261827bd1fa10d6f3ba9387adacd
1 /* system dependent functions for use inside the whole kernel. */
3 #include "kernel/kernel.h"
5 #include <unistd.h>
6 #include <ctype.h>
7 #include <string.h>
8 #include <minix/cpufeature.h>
9 #include <assert.h>
10 #include <signal.h>
11 #include <machine/vm.h>
13 #include <minix/u64.h>
15 #include "archconst.h"
16 #include "arch_proto.h"
17 #include "serial.h"
18 #include "kernel/proc.h"
19 #include "kernel/debug.h"
21 #include "glo.h"
23 void * k_stacks;
25 static void ser_init(void);
27 void fpu_init(void)
31 void save_local_fpu(struct proc *pr, int retain)
35 void save_fpu(struct proc *pr)
39 void arch_proc_reset(struct proc *pr)
41 assert(pr->p_nr < NR_PROCS);
43 /* Clear process state. */
44 memset(&pr->p_reg, 0, sizeof(pr->p_reg));
45 if(iskerneln(pr->p_nr))
46 pr->p_reg.psr = INIT_TASK_PSR;
47 else
48 pr->p_reg.psr = INIT_PSR;
51 void arch_proc_setcontext(struct proc *p, struct stackframe_s *state,
52 int isuser, int trapstyle)
56 void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
58 p->p_reg.r1 = val;
61 int restore_fpu(struct proc *pr)
63 return 0;
66 void cpu_identify(void)
68 u32_t midr;
69 unsigned cpu = cpuid;
71 asm volatile("mrc p15, 0, %[midr], c0, c0, 0 @ read MIDR\n\t"
72 : [midr] "=r" (midr));
74 cpu_info[cpu].implementer = midr >> 24;
75 cpu_info[cpu].variant = (midr >> 20) & 0xF;
76 cpu_info[cpu].arch = (midr >> 16) & 0xF;
77 cpu_info[cpu].part = (midr >> 4) & 0xFFF;
78 cpu_info[cpu].revision = midr & 0xF;
81 void arch_init(void)
83 k_stacks = (void*) &k_stacks_start;
84 assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
86 #ifndef CONFIG_SMP
88 * use stack 0 and cpu id 0 on a single processor machine, SMP
89 * configuration does this in smp_init() for all cpus at once
91 tss_init(0, get_k_stack_top(0));
92 #endif
94 ser_init();
97 /*===========================================================================*
98 * do_ser_debug *
99 *===========================================================================*/
100 void do_ser_debug()
104 void arch_do_syscall(struct proc *proc)
106 /* do_ipc assumes that it's running because of the current process */
107 assert(proc == get_cpulocal_var(proc_ptr));
108 /* Make the system call, for real this time. */
109 proc->p_reg.retreg =
110 do_ipc(proc->p_reg.retreg, proc->p_reg.r1, proc->p_reg.r2);
113 reg_t svc_stack;
115 struct proc * arch_finish_switch_to_user(void)
117 char * stk;
118 struct proc * p;
120 #ifdef CONFIG_SMP
121 stk = (char *)tss[cpuid].sp0;
122 #else
123 stk = (char *)tss[0].sp0;
124 #endif
125 svc_stack = (reg_t)stk;
126 /* set pointer to the process to run on the stack */
127 p = get_cpulocal_var(proc_ptr);
128 *((reg_t *)stk) = (reg_t) p;
130 /* make sure I bit is clear in PSR so that interrupts won't be disabled
131 * once p's context is restored. this should not be possible.
133 assert(!(p->p_reg.psr & PSR_I));
135 return p;
138 void fpu_sigcontext(struct proc *pr, struct sigframe *fr, struct sigcontext *sc)
142 reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
144 void get_randomness(struct k_randomness *rand, int source)
148 static void ser_init(void)
152 /*===========================================================================*/
153 /* __switch_address_space */
154 /*===========================================================================*/
156 * sets the ttbr register to the supplied value if it is not already set to the
157 * same value in which case it would only result in an extra TLB flush which is
158 * not desirable
160 void __switch_address_space(struct proc *p, struct proc **__ptproc)
162 reg_t orig_ttbr, new_ttbr;
164 new_ttbr = p->p_seg.p_ttbr;
165 if (new_ttbr == 0)
166 return;
168 orig_ttbr = read_ttbr0();
171 * test if ttbr is loaded with the current value to avoid unnecessary
172 * TLB flushes
174 if (new_ttbr == orig_ttbr)
175 return;
177 refresh_tlb();
178 write_ttbr0(new_ttbr);
180 *__ptproc = p;
182 return;