2 #include "kernel/kernel.h"
3 #include "kernel/proc.h"
6 #include <machine/vm.h>
8 #include <minix/type.h>
9 #include <minix/syslib.h>
10 #include <minix/cpufeature.h>
16 #include <machine/vm.h>
18 #include "arch_proto.h"
19 #include "kernel/proto.h"
20 #include "kernel/debug.h"
21 #include "omap_timer.h"
23 phys_bytes device_mem_vaddr
= 0;
25 #define HASPT(procptr) ((procptr)->p_seg.p_ttbr != 0)
26 static int nfreepdes
= 0;
28 static int freepdes
[MAXFREEPDES
];
30 static u32_t
phys_get32(phys_bytes v
);
32 extern vir_bytes omap3_gptimer10_base
= OMAP3_GPTIMER10_BASE
;
34 void mem_clear_mapcache(void)
37 for(i
= 0; i
< nfreepdes
; i
++) {
38 struct proc
*ptproc
= get_cpulocal_var(ptproc
);
39 int pde
= freepdes
[i
];
42 ptv
= ptproc
->p_seg
.p_ttbr_v
;
48 /* This function sets up a mapping from within the kernel's address
49 * space to any other area of memory, either straight physical
50 * memory (pr == NULL) or a process view of memory, in 1MB windows.
51 * I.e., it maps in 1MB chunks of virtual (or physical) address space
52 * to 1MB chunks of kernel virtual address space.
54 * It recognizes pr already being in memory as a special case (no
57 * The target (i.e. in-kernel) mapping area is one of the freepdes[]
58 * VM has earlier already told the kernel about that is available. It is
59 * identified as the 'pde' parameter. This value can be chosen freely
60 * by the caller, as long as it is in range (i.e. 0 or higher and corresonds
61 * to a known freepde slot). It is up to the caller to keep track of which
62 * freepde's are in use, and to determine which ones are free to use.
64 * The logical number supplied by the caller is translated into an actual
65 * pde number to be used, and a pointer to it (linear address) is returned
66 * for actual use by phys_copy or memset.
68 static phys_bytes
createpde(
69 const struct proc
*pr
, /* Requested process, NULL for physical. */
70 const phys_bytes linaddr
,/* Address after segment translation. */
71 phys_bytes
*bytes
, /* Size of chunk, function may truncate it. */
72 int free_pde_idx
, /* index of the free slot to use */
73 int *changed
/* If mapping is made, this is set to 1. */
80 assert(free_pde_idx
>= 0 && free_pde_idx
< nfreepdes
);
81 pde
= freepdes
[free_pde_idx
];
82 assert(pde
>= 0 && pde
< 4096);
84 if(pr
&& ((pr
== get_cpulocal_var(ptproc
)) || iskernelp(pr
))) {
85 /* Process memory is requested, and
86 * it's a process that is already in current page table, or
87 * the kernel, which is always there.
88 * Therefore linaddr is valid directly, with the requested
95 /* Requested address is in a process that is not currently
96 * accessible directly. Grab the PDE entry of that process'
97 * page table that corresponds to the requested address.
99 assert(pr
->p_seg
.p_ttbr_v
);
100 pdeval
= pr
->p_seg
.p_ttbr_v
[ARM_VM_PDE(linaddr
)];
102 /* Requested address is physical. Make up the PDE entry. */
103 pdeval
= (linaddr
& ARM_VM_SECTION_MASK
)
105 | ARM_VM_SECTION_DOMAIN
107 | ARM_VM_SECTION_USER
;
110 /* Write the pde value that we need into a pde that the kernel
111 * can access, into the currently loaded page table so it becomes
114 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
115 if(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
[pde
] != pdeval
) {
116 get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
[pde
] = pdeval
;
120 /* Memory is now available, but only the 1MB window of virtual
121 * address space that we have mapped; calculate how much of
122 * the requested range is visible and return that in *bytes,
123 * if that is less than the requested range.
125 offset
= linaddr
& ARM_VM_OFFSET_MASK_1MB
; /* Offset in 1MB window. */
126 *bytes
= MIN(*bytes
, ARM_BIG_PAGE_SIZE
- offset
);
128 /* Return the linear address of the start of the new mapping. */
129 return ARM_BIG_PAGE_SIZE
*pde
+ offset
;
133 /*===========================================================================*
134 * check_resumed_caller *
135 *===========================================================================*/
136 static int check_resumed_caller(struct proc
*caller
)
138 /* Returns the result from VM if caller was resumed, otherwise OK. */
139 if (caller
&& (caller
->p_misc_flags
& MF_KCALL_RESUME
)) {
140 assert(caller
->p_vmrequest
.vmresult
!= VMSUSPEND
);
141 return caller
->p_vmrequest
.vmresult
;
147 /*===========================================================================*
149 *===========================================================================*/
150 static int lin_lin_copy(struct proc
*srcproc
, vir_bytes srclinaddr
,
151 struct proc
*dstproc
, vir_bytes dstlinaddr
, vir_bytes bytes
)
156 assert(get_cpulocal_var(ptproc
));
157 assert(get_cpulocal_var(proc_ptr
));
158 assert(read_ttbr0() == get_cpulocal_var(ptproc
)->p_seg
.p_ttbr
);
160 procslot
= get_cpulocal_var(ptproc
)->p_nr
;
162 assert(procslot
>= 0 && procslot
< ARM_VM_DIR_ENTRIES
);
164 if(srcproc
) assert(!RTS_ISSET(srcproc
, RTS_SLOT_FREE
));
165 if(dstproc
) assert(!RTS_ISSET(dstproc
, RTS_SLOT_FREE
));
166 assert(!RTS_ISSET(get_cpulocal_var(ptproc
), RTS_SLOT_FREE
));
167 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
168 if(srcproc
) assert(!RTS_ISSET(srcproc
, RTS_VMINHIBIT
));
169 if(dstproc
) assert(!RTS_ISSET(dstproc
, RTS_VMINHIBIT
));
172 phys_bytes srcptr
, dstptr
;
173 vir_bytes chunk
= bytes
;
177 unsigned cpu
= cpuid
;
179 if (srcproc
&& GET_BIT(srcproc
->p_stale_tlb
, cpu
)) {
181 UNSET_BIT(srcproc
->p_stale_tlb
, cpu
);
183 if (dstproc
&& GET_BIT(dstproc
->p_stale_tlb
, cpu
)) {
185 UNSET_BIT(dstproc
->p_stale_tlb
, cpu
);
189 /* Set up 1MB ranges. */
190 srcptr
= createpde(srcproc
, srclinaddr
, &chunk
, 0, &changed
);
191 dstptr
= createpde(dstproc
, dstlinaddr
, &chunk
, 1, &changed
);
196 PHYS_COPY_CATCH(srcptr
, dstptr
, chunk
, addr
);
199 /* If addr is nonzero, a page fault was caught.
201 * phys_copy does all memory accesses word-aligned (rounded
202 * down), so pagefaults can occur at a lower address than
203 * the specified offsets. compute the lower bounds for sanity
206 vir_bytes src_aligned
= srcptr
& ~0x3, dst_aligned
= dstptr
& ~0x3;
208 if(addr
>= src_aligned
&& addr
< (srcptr
+ chunk
)) {
211 if(addr
>= dst_aligned
&& addr
< (dstptr
+ chunk
)) {
215 panic("lin_lin_copy fault out of range");
221 /* Update counter and addresses for next iteration, if any. */
227 if(srcproc
) assert(!RTS_ISSET(srcproc
, RTS_SLOT_FREE
));
228 if(dstproc
) assert(!RTS_ISSET(dstproc
, RTS_SLOT_FREE
));
229 assert(!RTS_ISSET(get_cpulocal_var(ptproc
), RTS_SLOT_FREE
));
230 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
235 static u32_t
phys_get32(phys_bytes addr
)
240 if((r
=lin_lin_copy(NULL
, addr
,
241 proc_addr(SYSTEM
), (phys_bytes
) &v
, sizeof(v
))) != OK
) {
242 panic("lin_lin_copy for phys_get32 failed: %d", r
);
248 /*===========================================================================*
250 *===========================================================================*/
251 phys_bytes
umap_virtual(rp
, seg
, vir_addr
, bytes
)
252 register struct proc
*rp
; /* pointer to proc table entry for process */
253 int seg
; /* T, D, or S segment */
254 vir_bytes vir_addr
; /* virtual address in bytes within the seg */
255 vir_bytes bytes
; /* # of bytes to be copied */
259 if(vm_lookup(rp
, vir_addr
, &phys
, NULL
) != OK
) {
260 printf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%x: 0x%lx failed\n", rp
->p_name
, seg
, vir_addr
);
264 panic("vm_lookup returned phys: %d", phys
);
268 printf("SYSTEM:umap_virtual: lookup failed\n");
272 /* Now make sure addresses are contiguous in physical memory
273 * so that the umap makes sense.
275 if(bytes
> 0 && vm_lookup_range(rp
, vir_addr
, NULL
, bytes
) != bytes
) {
276 printf("umap_virtual: %s: %lu at 0x%lx (vir 0x%lx) not contiguous\n",
277 rp
->p_name
, bytes
, vir_addr
, vir_addr
);
281 /* phys must be larger than 0 (or the caller will think the call
282 * failed), and address must not cross a page boundary.
290 /*===========================================================================*
292 *===========================================================================*/
293 int vm_lookup(const struct proc
*proc
, const vir_bytes
virtual,
294 phys_bytes
*physical
, u32_t
*ptent
)
302 assert(!isemptyp(proc
));
305 /* Retrieve page directory entry. */
306 root
= (u32_t
*) proc
->p_seg
.p_ttbr
;
307 assert(!((u32_t
) root
% ARM_PAGEDIR_SIZE
));
308 pde
= ARM_VM_PDE(virtual);
309 assert(pde
>= 0 && pde
< ARM_VM_DIR_ENTRIES
);
310 pde_v
= phys_get32((u32_t
) (root
+ pde
));
312 if(!(pde_v
& ARM_VM_PDE_PRESENT
)) {
316 /* We don't expect to ever see this.
317 * LSC Impossible with the previous test.
318 if(pde_v & ARM_VM_BIGPAGE) {
319 *physical = pde_v & ARM_VM_SECTION_MASK;
320 if(ptent) *ptent = pde_v;
321 *physical += virtual & ARM_VM_OFFSET_MASK_1MB;
323 /* Retrieve page table entry. */
324 pt
= (u32_t
*) (pde_v
& ARM_VM_PDE_MASK
);
325 assert(!((u32_t
) pt
% ARM_PAGETABLE_SIZE
));
326 pte
= ARM_VM_PTE(virtual);
327 assert(pte
>= 0 && pte
< ARM_VM_PT_ENTRIES
);
328 pte_v
= phys_get32((u32_t
) (pt
+ pte
));
329 if(!(pte_v
& ARM_VM_PTE_PRESENT
)) {
333 if(ptent
) *ptent
= pte_v
;
335 /* Actual address now known; retrieve it and add page offset. */
336 *physical
= pte_v
& ARM_VM_PTE_MASK
;
337 *physical
+= virtual % ARM_PAGE_SIZE
;
343 /*===========================================================================*
345 *===========================================================================*/
346 size_t vm_lookup_range(const struct proc
*proc
, vir_bytes vir_addr
,
347 phys_bytes
*phys_addr
, size_t bytes
)
349 /* Look up the physical address corresponding to linear virtual address
350 * 'vir_addr' for process 'proc'. Return the size of the range covered
351 * by contiguous physical memory starting from that address; this may
352 * be anywhere between 0 and 'bytes' inclusive. If the return value is
353 * nonzero, and 'phys_addr' is non-NULL, 'phys_addr' will be set to the
354 * base physical address of the range. 'vir_addr' and 'bytes' need not
355 * be page-aligned, but the caller must have verified that the given
356 * linear range is valid for the given process at all.
358 phys_bytes phys
, next_phys
;
365 /* Look up the first page. */
366 if (vm_lookup(proc
, vir_addr
, &phys
, NULL
) != OK
)
369 if (phys_addr
!= NULL
)
372 len
= ARM_PAGE_SIZE
- (vir_addr
% ARM_PAGE_SIZE
);
374 next_phys
= phys
+ len
;
376 /* Look up any next pages and test physical contiguity. */
377 while (len
< bytes
) {
378 if (vm_lookup(proc
, vir_addr
, &phys
, NULL
) != OK
)
381 if (next_phys
!= phys
)
384 len
+= ARM_PAGE_SIZE
;
385 vir_addr
+= ARM_PAGE_SIZE
;
386 next_phys
+= ARM_PAGE_SIZE
;
389 /* We might now have overshot the requested length somewhat. */
390 return MIN(bytes
, len
);
393 /*===========================================================================*
395 *===========================================================================*/
396 static void vm_suspend(struct proc
*caller
, const struct proc
*target
,
397 const vir_bytes linaddr
, const vir_bytes len
, const int type
)
399 /* This range is not OK for this process. Set parameters
400 * of the request and notify VM about the pending request.
402 assert(!RTS_ISSET(caller
, RTS_VMREQUEST
));
403 assert(!RTS_ISSET(target
, RTS_VMREQUEST
));
405 RTS_SET(caller
, RTS_VMREQUEST
);
407 caller
->p_vmrequest
.req_type
= VMPTYPE_CHECK
;
408 caller
->p_vmrequest
.target
= target
->p_endpoint
;
409 caller
->p_vmrequest
.params
.check
.start
= linaddr
;
410 caller
->p_vmrequest
.params
.check
.length
= len
;
411 caller
->p_vmrequest
.params
.check
.writeflag
= 1;
412 caller
->p_vmrequest
.type
= type
;
414 /* Connect caller on vmrequest wait queue. */
415 if(!(caller
->p_vmrequest
.nextrequestor
= vmrequest
))
416 if(OK
!= send_sig(VM_PROC_NR
, SIGKMEM
))
417 panic("send_sig failed");
421 /*===========================================================================*
423 *===========================================================================*/
424 int vm_check_range(struct proc
*caller
, struct proc
*target
,
425 vir_bytes vir_addr
, size_t bytes
)
427 /* Public interface to vm_suspend(), for use by kernel calls. On behalf
428 * of 'caller', call into VM to check linear virtual address range of
429 * process 'target', starting at 'vir_addr', for 'bytes' bytes. This
430 * function assumes that it will called twice if VM returned an error
431 * the first time (since nothing has changed in that case), and will
432 * then return the error code resulting from the first call. Upon the
433 * first call, a non-success error code is returned as well.
437 if ((caller
->p_misc_flags
& MF_KCALL_RESUME
) &&
438 (r
= caller
->p_vmrequest
.vmresult
) != OK
)
441 vm_suspend(caller
, target
, vir_addr
, bytes
, VMSTYPE_KERNELCALL
);
446 /*===========================================================================*
448 *===========================================================================*/
449 void delivermsg(struct proc
*rp
)
453 assert(rp
->p_misc_flags
& MF_DELIVERMSG
);
454 assert(rp
->p_delivermsg
.m_source
!= NONE
);
456 if (copy_msg_to_user(&rp
->p_delivermsg
,
457 (message
*) rp
->p_delivermsg_vir
)) {
458 printf("WARNING wrong user pointer 0x%08lx from "
460 rp
->p_delivermsg_vir
,
466 /* Indicate message has been delivered; address is 'used'. */
467 rp
->p_delivermsg
.m_source
= NONE
;
468 rp
->p_misc_flags
&= ~MF_DELIVERMSG
;
470 if(!(rp
->p_misc_flags
& MF_CONTEXT_SET
)) {
471 rp
->p_reg
.retreg
= r
;
475 /*===========================================================================*
477 *===========================================================================*/
478 int vm_memset(struct proc
* caller
, endpoint_t who
, phys_bytes ph
, int c
,
482 struct proc
*whoptr
= NULL
;
483 phys_bytes cur_ph
= ph
;
484 phys_bytes left
= count
;
485 phys_bytes ptr
, chunk
, pfa
= 0;
486 int new_ttbr
, r
= OK
;
488 if ((r
= check_resumed_caller(caller
)) != OK
)
491 /* NONE for physical, otherwise virtual */
492 if (who
!= NONE
&& !(whoptr
= endpoint_lookup(who
)))
496 pattern
= c
| (c
<< 8) | (c
<< 16) | (c
<< 24);
498 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
499 assert(!catch_pagefaults
);
500 catch_pagefaults
= 1;
502 /* We can memset as many bytes as we have remaining,
503 * or as many as remain in the 1MB chunk we mapped in.
508 ptr
= createpde(whoptr
, cur_ph
, &chunk
, 0, &new_ttbr
);
513 /* If a page fault happens, pfa is non-null */
514 if ((pfa
= phys_memset(ptr
, pattern
, chunk
))) {
516 /* If a process pagefaults, VM may help out */
518 vm_suspend(caller
, whoptr
, ph
, count
,
520 assert(catch_pagefaults
);
521 catch_pagefaults
= 0;
525 /* Pagefault when phys copying ?! */
526 panic("vm_memset: pf %lx addr=%lx len=%lu\n",
534 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
535 assert(catch_pagefaults
);
536 catch_pagefaults
= 0;
541 /*===========================================================================*
543 *===========================================================================*/
544 int virtual_copy_f(caller
, src_addr
, dst_addr
, bytes
, vmcheck
)
545 struct proc
* caller
;
546 struct vir_addr
*src_addr
; /* source virtual address */
547 struct vir_addr
*dst_addr
; /* destination virtual address */
548 vir_bytes bytes
; /* # of bytes to copy */
549 int vmcheck
; /* if nonzero, can return VMSUSPEND */
551 /* Copy bytes from virtual address src_addr to virtual address dst_addr. */
552 struct vir_addr
*vir_addr
[2]; /* virtual source and destination address */
554 struct proc
*procs
[2];
556 assert((vmcheck
&& caller
) || (!vmcheck
&& !caller
));
558 /* Check copy count. */
559 if (bytes
<= 0) return(EDOM
);
561 /* Do some more checks and map virtual addresses to physical addresses. */
562 vir_addr
[_SRC_
] = src_addr
;
563 vir_addr
[_DST_
] = dst_addr
;
565 for (i
=_SRC_
; i
<=_DST_
; i
++) {
566 endpoint_t proc_e
= vir_addr
[i
]->proc_nr_e
;
573 if(!isokendpt(proc_e
, &proc_nr
)) {
574 printf("virtual_copy: no reasonable endpoint\n");
577 p
= proc_addr(proc_nr
);
583 if ((r
= check_resumed_caller(caller
)) != OK
)
586 if((r
=lin_lin_copy(procs
[_SRC_
], vir_addr
[_SRC_
]->offset
,
587 procs
[_DST_
], vir_addr
[_DST_
]->offset
, bytes
)) != OK
) {
588 struct proc
*target
= NULL
;
590 if(r
!= EFAULT_SRC
&& r
!= EFAULT_DST
)
591 panic("lin_lin_copy failed: %d", r
);
592 if(!vmcheck
|| !caller
) {
596 if(r
== EFAULT_SRC
) {
597 lin
= vir_addr
[_SRC_
]->offset
;
598 target
= procs
[_SRC_
];
599 } else if(r
== EFAULT_DST
) {
600 lin
= vir_addr
[_DST_
]->offset
;
601 target
= procs
[_DST_
];
603 panic("r strange: %d", r
);
609 vm_suspend(caller
, target
, lin
, bytes
, VMSTYPE_KERNELCALL
);
616 /*===========================================================================*
618 *===========================================================================*/
619 int data_copy(const endpoint_t from_proc
, const vir_bytes from_addr
,
620 const endpoint_t to_proc
, const vir_bytes to_addr
,
623 struct vir_addr src
, dst
;
625 src
.offset
= from_addr
;
626 dst
.offset
= to_addr
;
627 src
.proc_nr_e
= from_proc
;
628 dst
.proc_nr_e
= to_proc
;
629 assert(src
.proc_nr_e
!= NONE
);
630 assert(dst
.proc_nr_e
!= NONE
);
632 return virtual_copy(&src
, &dst
, bytes
);
635 /*===========================================================================*
636 * data_copy_vmcheck *
637 *===========================================================================*/
638 int data_copy_vmcheck(struct proc
* caller
,
639 const endpoint_t from_proc
, const vir_bytes from_addr
,
640 const endpoint_t to_proc
, const vir_bytes to_addr
,
643 struct vir_addr src
, dst
;
645 src
.offset
= from_addr
;
646 dst
.offset
= to_addr
;
647 src
.proc_nr_e
= from_proc
;
648 dst
.proc_nr_e
= to_proc
;
649 assert(src
.proc_nr_e
!= NONE
);
650 assert(dst
.proc_nr_e
!= NONE
);
652 return virtual_copy_vmcheck(caller
, &src
, &dst
, bytes
);
655 void memory_init(void)
657 assert(nfreepdes
== 0);
659 freepdes
[nfreepdes
++] = kinfo
.freepde_start
++;
660 freepdes
[nfreepdes
++] = kinfo
.freepde_start
++;
662 assert(kinfo
.freepde_start
< ARM_VM_DIR_ENTRIES
);
663 assert(nfreepdes
== 2);
664 assert(nfreepdes
<= MAXFREEPDES
);
667 /*===========================================================================*
669 *===========================================================================*/
670 void arch_proc_init(struct proc
*pr
, const u32_t ip
, const u32_t sp
, char *name
)
673 strcpy(pr
->p_name
, name
);
675 /* set custom state we know */
680 static int device_mem_mapping_index
= -1,
682 usermapped_glo_index
= -1,
683 usermapped_index
= -1, first_um_idx
= -1;
687 extern char usermapped_start
, usermapped_end
, usermapped_nonglo_start
;
689 int arch_phys_map(const int index
,
694 static int first
= 1;
696 u32_t glo_len
= (u32_t
) &usermapped_nonglo_start
-
697 (u32_t
) &usermapped_start
;
700 memset(&minix_kerninfo
, 0, sizeof(minix_kerninfo
));
701 device_mem_mapping_index
= freeidx
++;
702 frclock_index
= freeidx
++;
704 usermapped_glo_index
= freeidx
++;
707 usermapped_index
= freeidx
++;
708 first_um_idx
= usermapped_index
;
709 if(usermapped_glo_index
!= -1)
710 first_um_idx
= usermapped_glo_index
;
714 if(index
== usermapped_glo_index
) {
715 *addr
= vir2phys(&usermapped_start
);
717 *flags
= VMMF_USER
| VMMF_GLO
;
720 else if(index
== usermapped_index
) {
721 *addr
= vir2phys(&usermapped_nonglo_start
);
722 *len
= (u32_t
) &usermapped_end
-
723 (u32_t
) &usermapped_nonglo_start
;
727 else if (index
== device_mem_mapping_index
) {
728 /* map device memory */
731 *flags
= VMMF_UNCACHED
| VMMF_WRITE
;
734 else if (index
== frclock_index
) {
735 *addr
= OMAP3_GPTIMER10_BASE
;
736 *len
= ARM_PAGE_SIZE
;
744 int arch_phys_map_reply(const int index
, const vir_bytes addr
)
746 if(index
== first_um_idx
) {
747 u32_t usermapped_offset
;
748 assert(addr
> (u32_t
) &usermapped_start
);
749 usermapped_offset
= addr
- (u32_t
) &usermapped_start
;
750 #define FIXEDPTR(ptr) (void *) ((u32_t)ptr + usermapped_offset)
751 #define FIXPTR(ptr) ptr = FIXEDPTR(ptr)
752 #define ASSIGN(minixstruct) minix_kerninfo.minixstruct = FIXEDPTR(&minixstruct)
758 /* adjust the pointers of the functions and the struct
759 * itself to the user-accessible mapping
761 minix_kerninfo
.kerninfo_magic
= KERNINFO_MAGIC
;
762 minix_kerninfo
.minix_feature_flags
= minix_feature_flags
;
763 minix_kerninfo_user
= (vir_bytes
) FIXEDPTR(&minix_kerninfo
);
768 if (index
== usermapped_index
) {
771 else if (index
== device_mem_mapping_index
) {
772 device_mem_vaddr
= addr
;
775 else if (index
== frclock_index
) {
776 omap3_gptimer10_base
= minix_kerninfo
.minix_frclock
= addr
;
783 int arch_enable_paging(struct proc
* caller
)
785 assert(caller
->p_seg
.p_ttbr
);
787 /* load caller's page table */
788 switch_address_space(caller
);
790 device_mem
= (char *) device_mem_vaddr
;
795 void release_address_space(struct proc
*pr
)
797 pr
->p_seg
.p_ttbr_v
= NULL
;