1 /* $NetBSD: vm_machdep.c,v 1.52 2009/11/26 00:19:12 matt Exp $ */
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
8 * This code is derived from software written for Brini by Mark Brinicombe
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * RiscBSD kernel project
41 * vm machine specific bits
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.52 2009/11/26 00:19:12 matt Exp $");
49 #include "opt_armfpe.h"
50 #include "opt_pmap_debug.h"
51 #include "opt_perfctrs.h"
52 #include "opt_cputypes.h"
54 #include <sys/param.h>
55 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/vnode.h>
62 #include <sys/syslog.h>
64 #include <uvm/uvm_extern.h>
66 #include <machine/cpu.h>
67 #include <machine/pmap.h>
68 #include <machine/reg.h>
69 #include <machine/vmparam.h>
72 #include <arm/fpe-arm/armfpe.h>
75 extern pv_addr_t systempage
;
77 int process_read_regs(struct proc
*p
, struct reg
*regs
);
78 int process_read_fpregs(struct proc
*p
, struct fpreg
*regs
);
80 void lwp_trampoline(void);
83 * Special compilation symbols:
85 * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
86 * on forking and check the pattern on exit, reporting
87 * the amount of stack used.
91 cpu_proc_fork(struct proc
*p1
, struct proc
*p2
)
98 p2
->p_md
.pmc_enabled
= 0;
99 p2
->p_md
.pmc_state
= NULL
;
105 cpu_setfunc(struct lwp
*l
, void (*func
)(void *), void *arg
)
107 struct pcb
*pcb
= lwp_getpcb(l
);
108 struct trapframe
*tf
= pcb
->pcb_tf
;
109 struct switchframe
*sf
= (struct switchframe
*)tf
- 1;
111 sf
->sf_r4
= (u_int
)func
;
112 sf
->sf_r5
= (u_int
)arg
;
113 sf
->sf_sp
= (u_int
)tf
;
114 sf
->sf_pc
= (u_int
)lwp_trampoline
;
115 pcb
->pcb_un
.un_32
.pcb32_sp
= (u_int
)sf
;
119 * Finish a fork operation, with LWP l2 nearly set up.
121 * Copy and update the pcb and trapframe, making the child ready to run.
123 * Rig the child's kernel stack so that it will start out in
124 * lwp_trampoline() which will call the specified func with the argument arg.
126 * If an alternate user-level stack is requested (with non-zero values
127 * in both the stack and stacksize args), set up the user stack pointer
131 cpu_lwp_fork(struct lwp
*l1
, struct lwp
*l2
, void *stack
, size_t stacksize
,
132 void (*func
)(void *), void *arg
)
134 struct pcb
*pcb1
, *pcb2
;
135 struct trapframe
*tf
;
138 pcb1
= lwp_getpcb(l1
);
139 pcb2
= lwp_getpcb(l2
);
142 if (pmap_debug_level
>= 0)
143 printf("cpu_lwp_fork: %p %p %p %p\n", l1
, l2
, curlwp
, &lwp0
);
144 #endif /* PMAP_DEBUG */
148 /* Sync the PCB before we copy it. */
153 l2
->l_md
.md_flags
= l1
->l_md
.md_flags
& MDP_VFPUSED
;
157 * Copy the floating point state from the VFP to the PCB
158 * if this process has state stored there.
160 if (pcb1
->pcb_vfpcpu
!= NULL
)
161 vfp_saveregs_lwp(l1
, 1);
168 * Set up the kernel stack for the process.
169 * Note: this stack is not in use if we are forking from p1
171 uv
= uvm_lwp_getuarea(l2
);
172 pcb2
->pcb_un
.un_32
.pcb32_sp
= uv
+ USPACE_SVC_STACK_TOP
;
175 /* Fill the kernel stack with a known pattern */
176 memset((void *)(uv
+ USPACE_SVC_STACK_BOTTOM
), 0xdd,
177 (USPACE_SVC_STACK_TOP
- USPACE_SVC_STACK_BOTTOM
));
178 #endif /* STACKCHECKS */
181 if (pmap_debug_level
>= 0) {
182 printf("l1: pcb=%p pid=%d pmap=%p\n",
183 pcb1
, l1
->l_lid
, l1
->l_proc
->p_vmspace
->vm_map
.pmap
);
184 printf("l2: pcb=%p pid=%d pmap=%p\n",
185 pcb2
, l2
->l_lid
, l2
->l_proc
->p_vmspace
->vm_map
.pmap
);
187 #endif /* PMAP_DEBUG */
190 /* Initialise a new FP context for p2 and copy the context from p1 */
191 arm_fpe_core_initcontext(FP_CONTEXT(l2
));
192 arm_fpe_copycontext(FP_CONTEXT(l1
), FP_CONTEXT(l2
));
195 tf
= (struct trapframe
*)pcb2
->pcb_un
.un_32
.pcb32_sp
- 1;
200 * If specified, give the child a different stack.
203 tf
->tf_usr_sp
= (u_int
)stack
+ stacksize
;
205 cpu_setfunc(l2
, func
, arg
);
209 * cpu_exit is called as the last action during exit.
211 * We clean up a little and then call switch_exit() with the old proc as an
212 * argument. switch_exit() first switches to lwp0's context, and finally
213 * jumps into switch() to wait for another process to wake up.
217 cpu_lwp_free(struct lwp
*l
, int proc
)
224 /* Abort any active FP operation and deactivate the context */
225 arm_fpe_core_abort(FP_CONTEXT(l
), NULL
, NULL
);
226 arm_fpe_core_changecontext(0);
231 if (pcb
->pcb_vfpcpu
!= NULL
)
232 vfp_saveregs_lwp(l
, 0);
236 /* Report how much stack has been used - debugging */
241 ptr
= ((u_char
*)p2
->p_addr
) + USPACE_SVC_STACK_BOTTOM
;
242 for (loop
= 0; loop
< (USPACE_SVC_STACK_TOP
- USPACE_SVC_STACK_BOTTOM
)
243 && *ptr
== 0xdd; ++loop
, ++ptr
) ;
244 log(LOG_INFO
, "%d bytes of svc stack fill pattern\n", loop
);
246 #endif /* STACKCHECKS */
250 cpu_lwp_free2(struct lwp
*l
)
255 * Map a user I/O request into kernel virtual address space.
256 * Note: the pages are already locked by uvm_vslock(), so we
257 * do not need to pass an access_type to pmap_enter().
260 vmapbuf(struct buf
*bp
, vsize_t len
)
262 vaddr_t faddr
, taddr
, off
;
267 if (pmap_debug_level
>= 0)
268 printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int
)bp
,
269 (u_int
)bp
->b_data
, (u_int
)len
);
270 #endif /* PMAP_DEBUG */
272 if ((bp
->b_flags
& B_PHYS
) == 0)
275 bp
->b_saveaddr
= bp
->b_data
;
276 faddr
= trunc_page((vaddr_t
)bp
->b_data
);
277 off
= (vaddr_t
)bp
->b_data
- faddr
;
278 len
= round_page(off
+ len
);
279 taddr
= uvm_km_alloc(phys_map
, len
, 0, UVM_KMF_VAONLY
| UVM_KMF_WAITVA
);
280 bp
->b_data
= (void *)(taddr
+ off
);
283 * The region is locked, so we expect that pmap_pte() will return
287 (void) pmap_extract(vm_map_pmap(&bp
->b_proc
->p_vmspace
->vm_map
),
289 pmap_enter(pmap_kernel(), taddr
, fpa
,
290 VM_PROT_READ
|VM_PROT_WRITE
, VM_PROT_READ
|VM_PROT_WRITE
|PMAP_WIRED
);
295 pmap_update(pmap_kernel());
299 * Unmap a previously-mapped user I/O request.
302 vunmapbuf(struct buf
*bp
, vsize_t len
)
307 if (pmap_debug_level
>= 0)
308 printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
309 (u_int
)bp
, (u_int
)bp
->b_data
, (u_int
)len
);
310 #endif /* PMAP_DEBUG */
312 if ((bp
->b_flags
& B_PHYS
) == 0)
316 * Make sure the cache does not have dirty data for the
317 * pages we had mapped.
319 addr
= trunc_page((vaddr_t
)bp
->b_data
);
320 off
= (vaddr_t
)bp
->b_data
- addr
;
321 len
= round_page(off
+ len
);
323 pmap_remove(pmap_kernel(), addr
, addr
+ len
);
324 pmap_update(pmap_kernel());
325 uvm_km_free(phys_map
, addr
, len
, UVM_KMF_VAONLY
);
326 bp
->b_data
= bp
->b_saveaddr
;
330 /* End of vm_machdep.c */