make vfs & filesystems use failable copying
[minix3.git] / kernel / arch / earm / arch_system.c
blob9399c2e4cc14c92a03e469fd8439cddd4b6e7d0c
1 /* system dependent functions for use inside the whole kernel. */
3 #include "kernel/kernel.h"
5 #include <unistd.h>
6 #include <ctype.h>
7 #include <string.h>
8 #include <minix/cpufeature.h>
9 #include <assert.h>
10 #include <signal.h>
11 #include <machine/vm.h>
12 #include <machine/signal.h>
13 #include <arm/armreg.h>
15 #include <minix/u64.h>
17 #include "archconst.h"
18 #include "arch_proto.h"
19 #include "kernel/proc.h"
20 #include "kernel/debug.h"
21 #include "ccnt.h"
22 #include "bsp_init.h"
23 #include "bsp_serial.h"
25 #include "glo.h"
27 void * k_stacks;
30 void fpu_init(void)
34 void save_local_fpu(struct proc *pr, int retain)
38 void save_fpu(struct proc *pr)
42 void arch_proc_reset(struct proc *pr)
44 assert(pr->p_nr < NR_PROCS);
46 /* Clear process state. */
47 memset(&pr->p_reg, 0, sizeof(pr->p_reg));
48 if(iskerneln(pr->p_nr)) {
49 pr->p_reg.psr = INIT_TASK_PSR;
50 } else {
51 pr->p_reg.psr = INIT_PSR;
55 void arch_proc_setcontext(struct proc *p, struct stackframe_s *state,
56 int isuser, int trapstyle)
58 assert(sizeof(p->p_reg) == sizeof(*state));
59 if(state != &p->p_reg) {
60 memcpy(&p->p_reg, state, sizeof(*state));
63 /* further code is instructed to not touch the context
64 * any more
66 p->p_misc_flags |= MF_CONTEXT_SET;
68 if(!(p->p_rts_flags)) {
69 printf("WARNINIG: setting full context of runnable process\n");
70 print_proc(p);
71 util_stacktrace();
75 void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
77 p->p_reg.r1 = val;
80 int restore_fpu(struct proc *pr)
82 return 0;
85 void cpu_identify(void)
87 u32_t midr;
88 unsigned cpu = cpuid;
90 asm volatile("mrc p15, 0, %[midr], c0, c0, 0 @ read MIDR\n\t"
91 : [midr] "=r" (midr));
93 cpu_info[cpu].implementer = midr >> 24;
94 cpu_info[cpu].variant = (midr >> 20) & 0xF;
95 cpu_info[cpu].arch = (midr >> 16) & 0xF;
96 cpu_info[cpu].part = (midr >> 4) & 0xFFF;
97 cpu_info[cpu].revision = midr & 0xF;
98 cpu_info[cpu].freq = 660; /* 660 Mhz hardcoded */
101 void arch_init(void)
103 u32_t value;
105 k_stacks = (void*) &k_stacks_start;
106 assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
108 #ifndef CONFIG_SMP
110 * use stack 0 and cpu id 0 on a single processor machine, SMP
111 * configuration does this in smp_init() for all cpus at once
113 tss_init(0, get_k_stack_top(0));
114 #endif
117 /* enable user space access to cycle counter */
118 /* set cycle counter to 0: ARM ARM B4.1.113 and B4.1.117 */
119 asm volatile ("MRC p15, 0, %0, c9, c12, 0\t\n": "=r" (value));
120 value |= PMU_PMCR_C; /* Reset counter */
121 value |= PMU_PMCR_E; /* Enable counter hardware */
122 asm volatile ("MCR p15, 0, %0, c9, c12, 0\t\n": : "r" (value));
124 /* enable CCNT counting: ARM ARM B4.1.116 */
125 value = PMU_PMCNTENSET_C; /* Enable PMCCNTR cycle counter */
126 asm volatile ("MCR p15, 0, %0, c9, c12, 1\t\n": : "r" (value));
128 /* enable cycle counter in user mode: ARM ARM B4.1.124 */
129 value = PMU_PMUSERENR_EN;
130 asm volatile ("MCR p15, 0, %0, c9, c14, 0\t\n": : "r" (value));
131 bsp_init();
134 /*===========================================================================*
135 * do_ser_debug *
136 *===========================================================================*/
137 void do_ser_debug()
141 void arch_do_syscall(struct proc *proc)
143 /* do_ipc assumes that it's running because of the current process */
144 assert(proc == get_cpulocal_var(proc_ptr));
145 /* Make the system call, for real this time. */
146 proc->p_reg.retreg =
147 do_ipc(proc->p_reg.retreg, proc->p_reg.r1, proc->p_reg.r2);
150 reg_t svc_stack;
152 struct proc * arch_finish_switch_to_user(void)
154 char * stk;
155 struct proc * p;
157 #ifdef CONFIG_SMP
158 stk = (char *)tss[cpuid].sp0;
159 #else
160 stk = (char *)tss[0].sp0;
161 #endif
162 svc_stack = (reg_t)stk;
163 /* set pointer to the process to run on the stack */
164 p = get_cpulocal_var(proc_ptr);
165 *((reg_t *)stk) = (reg_t) p;
167 /* turn interrupts on */
168 p->p_reg.psr &= ~(PSR_I|PSR_F);
170 return p;
173 void fpu_sigcontext(struct proc *pr, struct sigframe_sigcontext *fr, struct sigcontext *sc)
177 reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
179 void get_randomness(struct k_randomness *rand, int source)
183 void arch_ser_init(void)
185 bsp_ser_init();
188 /*===========================================================================*/
189 /* __switch_address_space */
190 /*===========================================================================*/
192 * sets the ttbr register to the supplied value if it is not already set to the
193 * same value in which case it would only result in an extra TLB flush which is
194 * not desirable
196 void __switch_address_space(struct proc *p, struct proc **__ptproc)
198 reg_t orig_ttbr, new_ttbr;
200 new_ttbr = p->p_seg.p_ttbr;
201 if (new_ttbr == 0)
202 return;
204 orig_ttbr = read_ttbr0();
207 * test if ttbr is loaded with the current value to avoid unnecessary
208 * TLB flushes
210 if (new_ttbr == orig_ttbr)
211 return;
213 write_ttbr0(new_ttbr);
215 *__ptproc = p;
217 return;