2 #include "kernel/kernel.h"
3 #include "kernel/proc.h"
6 #include <machine/vm.h>
8 #include <minix/type.h>
9 #include <minix/board.h>
10 #include <minix/syslib.h>
11 #include <minix/cpufeature.h>
17 #include <machine/vm.h>
19 #include "arch_proto.h"
20 #include "kernel/proto.h"
21 #include "kernel/debug.h"
22 #include "bsp_timer.h"
25 #define HASPT(procptr) ((procptr)->p_seg.p_ttbr != 0)
26 static int nfreepdes
= 0;
28 static int freepdes
[MAXFREEPDES
];
30 static u32_t
phys_get32(phys_bytes v
);
32 /* list of requested physical mapping */
33 static kern_phys_map
*kern_phys_map_head
;
35 void mem_clear_mapcache(void)
38 for(i
= 0; i
< nfreepdes
; i
++) {
39 struct proc
*ptproc
= get_cpulocal_var(ptproc
);
40 int pde
= freepdes
[i
];
43 ptv
= ptproc
->p_seg
.p_ttbr_v
;
49 /* This function sets up a mapping from within the kernel's address
50 * space to any other area of memory, either straight physical
51 * memory (pr == NULL) or a process view of memory, in 1MB windows.
52 * I.e., it maps in 1MB chunks of virtual (or physical) address space
53 * to 1MB chunks of kernel virtual address space.
55 * It recognizes pr already being in memory as a special case (no
58 * The target (i.e. in-kernel) mapping area is one of the freepdes[]
59 * VM has earlier already told the kernel about that is available. It is
60 * identified as the 'pde' parameter. This value can be chosen freely
61 * by the caller, as long as it is in range (i.e. 0 or higher and corresponds
62 * to a known freepde slot). It is up to the caller to keep track of which
63 * freepde's are in use, and to determine which ones are free to use.
65 * The logical number supplied by the caller is translated into an actual
66 * pde number to be used, and a pointer to it (linear address) is returned
67 * for actual use by phys_copy or memset.
69 static phys_bytes
createpde(
70 const struct proc
*pr
, /* Requested process, NULL for physical. */
71 const phys_bytes linaddr
,/* Address after segment translation. */
72 phys_bytes
*bytes
, /* Size of chunk, function may truncate it. */
73 int free_pde_idx
, /* index of the free slot to use */
74 int *changed
/* If mapping is made, this is set to 1. */
81 assert(free_pde_idx
>= 0 && free_pde_idx
< nfreepdes
);
82 pde
= freepdes
[free_pde_idx
];
83 assert(pde
>= 0 && pde
< 4096);
85 if(pr
&& ((pr
== get_cpulocal_var(ptproc
)) || iskernelp(pr
))) {
86 /* Process memory is requested, and
87 * it's a process that is already in current page table, or
88 * the kernel, which is always there.
89 * Therefore linaddr is valid directly, with the requested
96 /* Requested address is in a process that is not currently
97 * accessible directly. Grab the PDE entry of that process'
98 * page table that corresponds to the requested address.
100 assert(pr
->p_seg
.p_ttbr_v
);
101 pdeval
= pr
->p_seg
.p_ttbr_v
[ARM_VM_PDE(linaddr
)];
103 /* Requested address is physical. Make up the PDE entry. */
104 assert (linaddr
>= PHYS_MEM_BEGIN
&& linaddr
<= PHYS_MEM_END
);
107 pdeval
= (linaddr
& ARM_VM_SECTION_MASK
)
109 | ARM_VM_SECTION_DOMAIN
110 | ARM_VM_SECTION_CACHED
111 | ARM_VM_SECTION_USER
;
114 /* Write the pde value that we need into a pde that the kernel
115 * can access, into the currently loaded page table so it becomes
118 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
119 if(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
[pde
] != pdeval
) {
120 get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
[pde
] = pdeval
;
124 /* Memory is now available, but only the 1MB window of virtual
125 * address space that we have mapped; calculate how much of
126 * the requested range is visible and return that in *bytes,
127 * if that is less than the requested range.
129 offset
= linaddr
& ARM_VM_OFFSET_MASK_1MB
; /* Offset in 1MB window. */
130 *bytes
= MIN(*bytes
, ARM_SECTION_SIZE
- offset
);
132 /* Return the linear address of the start of the new mapping. */
133 return ARM_SECTION_SIZE
*pde
+ offset
;
137 /*===========================================================================*
138 * check_resumed_caller *
139 *===========================================================================*/
140 static int check_resumed_caller(struct proc
*caller
)
142 /* Returns the result from VM if caller was resumed, otherwise OK. */
143 if (caller
&& (caller
->p_misc_flags
& MF_KCALL_RESUME
)) {
144 assert(caller
->p_vmrequest
.vmresult
!= VMSUSPEND
);
145 return caller
->p_vmrequest
.vmresult
;
151 /*===========================================================================*
153 *===========================================================================*/
154 static int lin_lin_copy(struct proc
*srcproc
, vir_bytes srclinaddr
,
155 struct proc
*dstproc
, vir_bytes dstlinaddr
, vir_bytes bytes
)
160 assert(get_cpulocal_var(ptproc
));
161 assert(get_cpulocal_var(proc_ptr
));
162 assert(read_ttbr0() == get_cpulocal_var(ptproc
)->p_seg
.p_ttbr
);
164 procslot
= get_cpulocal_var(ptproc
)->p_nr
;
166 assert(procslot
>= 0 && procslot
< ARM_VM_DIR_ENTRIES
);
168 if(srcproc
) assert(!RTS_ISSET(srcproc
, RTS_SLOT_FREE
));
169 if(dstproc
) assert(!RTS_ISSET(dstproc
, RTS_SLOT_FREE
));
170 assert(!RTS_ISSET(get_cpulocal_var(ptproc
), RTS_SLOT_FREE
));
171 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
172 if(srcproc
) assert(!RTS_ISSET(srcproc
, RTS_VMINHIBIT
));
173 if(dstproc
) assert(!RTS_ISSET(dstproc
, RTS_VMINHIBIT
));
176 phys_bytes srcptr
, dstptr
;
177 vir_bytes chunk
= bytes
;
181 unsigned cpu
= cpuid
;
183 if (srcproc
&& GET_BIT(srcproc
->p_stale_tlb
, cpu
)) {
185 UNSET_BIT(srcproc
->p_stale_tlb
, cpu
);
187 if (dstproc
&& GET_BIT(dstproc
->p_stale_tlb
, cpu
)) {
189 UNSET_BIT(dstproc
->p_stale_tlb
, cpu
);
193 /* Set up 1MB ranges. */
194 srcptr
= createpde(srcproc
, srclinaddr
, &chunk
, 0, &changed
);
195 dstptr
= createpde(dstproc
, dstlinaddr
, &chunk
, 1, &changed
);
200 PHYS_COPY_CATCH(srcptr
, dstptr
, chunk
, addr
);
203 /* If addr is nonzero, a page fault was caught.
205 * phys_copy does all memory accesses word-aligned (rounded
206 * down), so pagefaults can occur at a lower address than
207 * the specified offsets. compute the lower bounds for sanity
210 vir_bytes src_aligned
= srcptr
& ~0x3, dst_aligned
= dstptr
& ~0x3;
212 if(addr
>= src_aligned
&& addr
< (srcptr
+ chunk
)) {
215 if(addr
>= dst_aligned
&& addr
< (dstptr
+ chunk
)) {
219 panic("lin_lin_copy fault out of range");
225 /* Update counter and addresses for next iteration, if any. */
231 if(srcproc
) assert(!RTS_ISSET(srcproc
, RTS_SLOT_FREE
));
232 if(dstproc
) assert(!RTS_ISSET(dstproc
, RTS_SLOT_FREE
));
233 assert(!RTS_ISSET(get_cpulocal_var(ptproc
), RTS_SLOT_FREE
));
234 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
239 static u32_t
phys_get32(phys_bytes addr
)
244 if((r
=lin_lin_copy(NULL
, addr
,
245 proc_addr(SYSTEM
), (phys_bytes
) &v
, sizeof(v
))) != OK
) {
246 panic("lin_lin_copy for phys_get32 failed: %d", r
);
252 /*===========================================================================*
254 *===========================================================================*/
255 phys_bytes
umap_virtual(rp
, seg
, vir_addr
, bytes
)
256 register struct proc
*rp
; /* pointer to proc table entry for process */
257 int seg
; /* T, D, or S segment */
258 vir_bytes vir_addr
; /* virtual address in bytes within the seg */
259 vir_bytes bytes
; /* # of bytes to be copied */
263 if(vm_lookup(rp
, vir_addr
, &phys
, NULL
) != OK
) {
264 printf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%x: 0x%lx failed\n", rp
->p_name
, seg
, vir_addr
);
268 panic("vm_lookup returned phys: 0x%lx", phys
);
272 printf("SYSTEM:umap_virtual: lookup failed\n");
276 /* Now make sure addresses are contiguous in physical memory
277 * so that the umap makes sense.
279 if(bytes
> 0 && vm_lookup_range(rp
, vir_addr
, NULL
, bytes
) != bytes
) {
280 printf("umap_virtual: %s: %lu at 0x%lx (vir 0x%lx) not contiguous\n",
281 rp
->p_name
, bytes
, vir_addr
, vir_addr
);
285 /* phys must be larger than 0 (or the caller will think the call
286 * failed), and address must not cross a page boundary.
294 /*===========================================================================*
296 *===========================================================================*/
297 int vm_lookup(const struct proc
*proc
, const vir_bytes
virtual,
298 phys_bytes
*physical
, u32_t
*ptent
)
306 assert(!isemptyp(proc
));
309 /* Retrieve page directory entry. */
310 root
= (u32_t
*) (proc
->p_seg
.p_ttbr
& ARM_TTBR_ADDR_MASK
);
311 assert(!((u32_t
) root
% ARM_PAGEDIR_SIZE
));
312 pde
= ARM_VM_PDE(virtual);
313 assert(pde
>= 0 && pde
< ARM_VM_DIR_ENTRIES
);
314 pde_v
= phys_get32((u32_t
) (root
+ pde
));
316 if(! ((pde_v
& ARM_VM_PDE_PRESENT
)
317 || (pde_v
& ARM_VM_SECTION_PRESENT
)
322 if(pde_v
& ARM_VM_SECTION
) {
323 *physical
= pde_v
& ARM_VM_SECTION_MASK
;
324 if(ptent
) *ptent
= pde_v
;
325 *physical
+= virtual & ARM_VM_OFFSET_MASK_1MB
;
327 /* Retrieve page table entry. */
328 pt
= (u32_t
*) (pde_v
& ARM_VM_PDE_MASK
);
329 assert(!((u32_t
) pt
% ARM_PAGETABLE_SIZE
));
330 pte
= ARM_VM_PTE(virtual);
331 assert(pte
>= 0 && pte
< ARM_VM_PT_ENTRIES
);
332 pte_v
= phys_get32((u32_t
) (pt
+ pte
));
333 if(!(pte_v
& ARM_VM_PTE_PRESENT
)) {
337 if(ptent
) *ptent
= pte_v
;
339 /* Actual address now known; retrieve it and add page offset. */
340 *physical
= pte_v
& ARM_VM_PTE_MASK
;
341 *physical
+= virtual % ARM_PAGE_SIZE
;
347 /*===========================================================================*
349 *===========================================================================*/
350 size_t vm_lookup_range(const struct proc
*proc
, vir_bytes vir_addr
,
351 phys_bytes
*phys_addr
, size_t bytes
)
353 /* Look up the physical address corresponding to linear virtual address
354 * 'vir_addr' for process 'proc'. Return the size of the range covered
355 * by contiguous physical memory starting from that address; this may
356 * be anywhere between 0 and 'bytes' inclusive. If the return value is
357 * nonzero, and 'phys_addr' is non-NULL, 'phys_addr' will be set to the
358 * base physical address of the range. 'vir_addr' and 'bytes' need not
359 * be page-aligned, but the caller must have verified that the given
360 * linear range is valid for the given process at all.
362 phys_bytes phys
, next_phys
;
369 /* Look up the first page. */
370 if (vm_lookup(proc
, vir_addr
, &phys
, NULL
) != OK
)
373 if (phys_addr
!= NULL
)
376 len
= ARM_PAGE_SIZE
- (vir_addr
% ARM_PAGE_SIZE
);
378 next_phys
= phys
+ len
;
380 /* Look up any next pages and test physical contiguity. */
381 while (len
< bytes
) {
382 if (vm_lookup(proc
, vir_addr
, &phys
, NULL
) != OK
)
385 if (next_phys
!= phys
)
388 len
+= ARM_PAGE_SIZE
;
389 vir_addr
+= ARM_PAGE_SIZE
;
390 next_phys
+= ARM_PAGE_SIZE
;
393 /* We might now have overshot the requested length somewhat. */
394 return MIN(bytes
, len
);
397 /*===========================================================================*
399 *===========================================================================*/
400 static void vm_suspend(struct proc
*caller
, const struct proc
*target
,
401 const vir_bytes linaddr
, const vir_bytes len
, const int type
,
404 /* This range is not OK for this process. Set parameters
405 * of the request and notify VM about the pending request.
407 assert(!RTS_ISSET(caller
, RTS_VMREQUEST
));
408 assert(!RTS_ISSET(target
, RTS_VMREQUEST
));
410 RTS_SET(caller
, RTS_VMREQUEST
);
412 caller
->p_vmrequest
.req_type
= VMPTYPE_CHECK
;
413 caller
->p_vmrequest
.target
= target
->p_endpoint
;
414 caller
->p_vmrequest
.params
.check
.start
= linaddr
;
415 caller
->p_vmrequest
.params
.check
.length
= len
;
416 caller
->p_vmrequest
.params
.check
.writeflag
= writeflag
;
417 caller
->p_vmrequest
.type
= type
;
419 /* Connect caller on vmrequest wait queue. */
420 if(!(caller
->p_vmrequest
.nextrequestor
= vmrequest
))
421 if(OK
!= send_sig(VM_PROC_NR
, SIGKMEM
))
422 panic("send_sig failed");
426 /*===========================================================================*
428 *===========================================================================*/
429 int vm_check_range(struct proc
*caller
, struct proc
*target
,
430 vir_bytes vir_addr
, size_t bytes
, int writeflag
)
432 /* Public interface to vm_suspend(), for use by kernel calls. On behalf
433 * of 'caller', call into VM to check linear virtual address range of
434 * process 'target', starting at 'vir_addr', for 'bytes' bytes. This
435 * function assumes that it will called twice if VM returned an error
436 * the first time (since nothing has changed in that case), and will
437 * then return the error code resulting from the first call. Upon the
438 * first call, a non-success error code is returned as well.
442 if ((caller
->p_misc_flags
& MF_KCALL_RESUME
) &&
443 (r
= caller
->p_vmrequest
.vmresult
) != OK
)
446 vm_suspend(caller
, target
, vir_addr
, bytes
, VMSTYPE_KERNELCALL
,
452 /*===========================================================================*
454 *===========================================================================*/
455 void delivermsg(struct proc
*rp
)
459 assert(rp
->p_misc_flags
& MF_DELIVERMSG
);
460 assert(rp
->p_delivermsg
.m_source
!= NONE
);
462 if (copy_msg_to_user(&rp
->p_delivermsg
,
463 (message
*) rp
->p_delivermsg_vir
)) {
464 printf("WARNING wrong user pointer 0x%08lx from "
466 rp
->p_delivermsg_vir
,
472 /* Indicate message has been delivered; address is 'used'. */
473 rp
->p_delivermsg
.m_source
= NONE
;
474 rp
->p_misc_flags
&= ~MF_DELIVERMSG
;
476 if(!(rp
->p_misc_flags
& MF_CONTEXT_SET
)) {
477 rp
->p_reg
.retreg
= r
;
481 /*===========================================================================*
483 *===========================================================================*/
484 int vm_memset(struct proc
* caller
, endpoint_t who
, phys_bytes ph
, int c
,
488 struct proc
*whoptr
= NULL
;
489 phys_bytes cur_ph
= ph
;
490 phys_bytes left
= count
;
491 phys_bytes ptr
, chunk
, pfa
= 0;
492 int new_ttbr
, r
= OK
;
494 if ((r
= check_resumed_caller(caller
)) != OK
)
497 /* NONE for physical, otherwise virtual */
498 if (who
!= NONE
&& !(whoptr
= endpoint_lookup(who
)))
502 pattern
= c
| (c
<< 8) | (c
<< 16) | (c
<< 24);
504 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
505 assert(!catch_pagefaults
);
506 catch_pagefaults
= 1;
508 /* We can memset as many bytes as we have remaining,
509 * or as many as remain in the 1MB chunk we mapped in.
514 ptr
= createpde(whoptr
, cur_ph
, &chunk
, 0, &new_ttbr
);
519 /* If a page fault happens, pfa is non-null */
520 if ((pfa
= phys_memset(ptr
, pattern
, chunk
))) {
522 /* If a process pagefaults, VM may help out */
524 vm_suspend(caller
, whoptr
, ph
, count
,
525 VMSTYPE_KERNELCALL
, 1);
526 assert(catch_pagefaults
);
527 catch_pagefaults
= 0;
531 /* Pagefault when phys copying ?! */
532 panic("vm_memset: pf %lx addr=%lx len=%lu\n",
540 assert(get_cpulocal_var(ptproc
)->p_seg
.p_ttbr_v
);
541 assert(catch_pagefaults
);
542 catch_pagefaults
= 0;
547 /*===========================================================================*
549 *===========================================================================*/
550 int virtual_copy_f(caller
, src_addr
, dst_addr
, bytes
, vmcheck
)
551 struct proc
* caller
;
552 struct vir_addr
*src_addr
; /* source virtual address */
553 struct vir_addr
*dst_addr
; /* destination virtual address */
554 vir_bytes bytes
; /* # of bytes to copy */
555 int vmcheck
; /* if nonzero, can return VMSUSPEND */
557 /* Copy bytes from virtual address src_addr to virtual address dst_addr. */
558 struct vir_addr
*vir_addr
[2]; /* virtual source and destination address */
560 struct proc
*procs
[2];
562 assert((vmcheck
&& caller
) || (!vmcheck
&& !caller
));
564 /* Check copy count. */
565 if (bytes
<= 0) return(EDOM
);
567 /* Do some more checks and map virtual addresses to physical addresses. */
568 vir_addr
[_SRC_
] = src_addr
;
569 vir_addr
[_DST_
] = dst_addr
;
571 for (i
=_SRC_
; i
<=_DST_
; i
++) {
572 endpoint_t proc_e
= vir_addr
[i
]->proc_nr_e
;
579 if(!isokendpt(proc_e
, &proc_nr
)) {
580 printf("virtual_copy: no reasonable endpoint\n");
583 p
= proc_addr(proc_nr
);
589 if ((r
= check_resumed_caller(caller
)) != OK
)
592 if((r
=lin_lin_copy(procs
[_SRC_
], vir_addr
[_SRC_
]->offset
,
593 procs
[_DST_
], vir_addr
[_DST_
]->offset
, bytes
)) != OK
) {
595 struct proc
*target
= NULL
;
597 if(r
!= EFAULT_SRC
&& r
!= EFAULT_DST
)
598 panic("lin_lin_copy failed: %d", r
);
599 if(!vmcheck
|| !caller
) {
603 if(r
== EFAULT_SRC
) {
604 lin
= vir_addr
[_SRC_
]->offset
;
605 target
= procs
[_SRC_
];
607 } else if(r
== EFAULT_DST
) {
608 lin
= vir_addr
[_DST_
]->offset
;
609 target
= procs
[_DST_
];
612 panic("r strange: %d", r
);
618 vm_suspend(caller
, target
, lin
, bytes
, VMSTYPE_KERNELCALL
, writeflag
);
625 /*===========================================================================*
627 *===========================================================================*/
628 int data_copy(const endpoint_t from_proc
, const vir_bytes from_addr
,
629 const endpoint_t to_proc
, const vir_bytes to_addr
,
632 struct vir_addr src
, dst
;
634 src
.offset
= from_addr
;
635 dst
.offset
= to_addr
;
636 src
.proc_nr_e
= from_proc
;
637 dst
.proc_nr_e
= to_proc
;
638 assert(src
.proc_nr_e
!= NONE
);
639 assert(dst
.proc_nr_e
!= NONE
);
641 return virtual_copy(&src
, &dst
, bytes
);
644 /*===========================================================================*
645 * data_copy_vmcheck *
646 *===========================================================================*/
647 int data_copy_vmcheck(struct proc
* caller
,
648 const endpoint_t from_proc
, const vir_bytes from_addr
,
649 const endpoint_t to_proc
, const vir_bytes to_addr
,
652 struct vir_addr src
, dst
;
654 src
.offset
= from_addr
;
655 dst
.offset
= to_addr
;
656 src
.proc_nr_e
= from_proc
;
657 dst
.proc_nr_e
= to_proc
;
658 assert(src
.proc_nr_e
!= NONE
);
659 assert(dst
.proc_nr_e
!= NONE
);
661 return virtual_copy_vmcheck(caller
, &src
, &dst
, bytes
);
664 void memory_init(void)
666 assert(nfreepdes
== 0);
668 freepdes
[nfreepdes
++] = kinfo
.freepde_start
++;
669 freepdes
[nfreepdes
++] = kinfo
.freepde_start
++;
671 assert(kinfo
.freepde_start
< ARM_VM_DIR_ENTRIES
);
672 assert(nfreepdes
== 2);
673 assert(nfreepdes
<= MAXFREEPDES
);
676 /*===========================================================================*
678 *===========================================================================*/
679 void arch_proc_init(struct proc
*pr
, const u32_t ip
, const u32_t sp
,
680 const u32_t ps_str
, char *name
)
683 strcpy(pr
->p_name
, name
);
685 /* set custom state we know */
688 pr
->p_reg
.retreg
= ps_str
; /* a.k.a r0*/
691 static int usermapped_glo_index
= -1,
692 usermapped_index
= -1, first_um_idx
= -1;
695 /* defined in kernel.lds */
696 extern char usermapped_start
, usermapped_end
, usermapped_nonglo_start
;
698 int arch_phys_map(const int index
,
703 static int first
= 1;
704 kern_phys_map
*phys_maps
;
707 u32_t glo_len
= (u32_t
) &usermapped_nonglo_start
-
708 (u32_t
) &usermapped_start
;
711 memset(&minix_kerninfo
, 0, sizeof(minix_kerninfo
));
713 usermapped_glo_index
= freeidx
++;
716 usermapped_index
= freeidx
++;
717 first_um_idx
= usermapped_index
;
718 if(usermapped_glo_index
!= -1)
719 first_um_idx
= usermapped_glo_index
;
722 /* list over the maps and index them */
723 phys_maps
= kern_phys_map_head
;
724 while(phys_maps
!= NULL
){
725 phys_maps
->index
= freeidx
++;
726 phys_maps
= phys_maps
->next
;
731 if(index
== usermapped_glo_index
) {
732 *addr
= vir2phys(&usermapped_start
);
734 *flags
= VMMF_USER
| VMMF_GLO
;
737 else if(index
== usermapped_index
) {
738 *addr
= vir2phys(&usermapped_nonglo_start
);
739 *len
= (u32_t
) &usermapped_end
-
740 (u32_t
) &usermapped_nonglo_start
;
745 /* if this all fails loop over the maps */
746 phys_maps
= kern_phys_map_head
;
747 while(phys_maps
!= NULL
){
748 if(phys_maps
->index
== index
){
749 *addr
= phys_maps
->addr
;
750 *len
= phys_maps
->size
;
751 *flags
= phys_maps
->vm_flags
;
754 phys_maps
= phys_maps
->next
;
760 int arch_phys_map_reply(const int index
, const vir_bytes addr
)
762 kern_phys_map
*phys_maps
;
764 if(index
== first_um_idx
) {
765 u32_t usermapped_offset
;
766 assert(addr
> (u32_t
) &usermapped_start
);
767 usermapped_offset
= addr
- (u32_t
) &usermapped_start
;
768 #define FIXEDPTR(ptr) (void *) ((u32_t)ptr + usermapped_offset)
769 #define FIXPTR(ptr) ptr = FIXEDPTR(ptr)
770 #define ASSIGN(minixstruct) minix_kerninfo.minixstruct = FIXEDPTR(&minixstruct)
776 /* adjust the pointers of the functions and the struct
777 * itself to the user-accessible mapping
779 minix_kerninfo
.kerninfo_magic
= KERNINFO_MAGIC
;
780 minix_kerninfo
.minix_feature_flags
= minix_feature_flags
;
781 minix_kerninfo_user
= (vir_bytes
) FIXEDPTR(&minix_kerninfo
);
785 if (index
== usermapped_index
) {
789 /* if this all fails loop over the maps */
790 /* list over the maps and index them */
791 phys_maps
= kern_phys_map_head
;
792 while(phys_maps
!= NULL
){
793 if(phys_maps
->index
== index
){
794 assert(phys_maps
->cb
!= NULL
);
795 /* only update the vir addr we are
796 going to call the callback in enable
799 phys_maps
->vir
= addr
;
802 phys_maps
= phys_maps
->next
;
808 int arch_enable_paging(struct proc
* caller
)
810 kern_phys_map
*phys_maps
;
811 assert(caller
->p_seg
.p_ttbr
);
814 /* load caller's page table */
815 switch_address_space(caller
);
817 /* We have now switched address spaces and the mappings are
818 valid. We can now remap previous mappings. This is not a
819 good time to do printf as the initial massing is gone and
820 the new mapping is not in place */
821 phys_maps
= kern_phys_map_head
;
822 while(phys_maps
!= NULL
){
823 assert(phys_maps
->cb
!= NULL
);
824 phys_maps
->cb(phys_maps
->id
, phys_maps
->vir
);
825 phys_maps
= phys_maps
->next
;
831 void release_address_space(struct proc
*pr
)
833 pr
->p_seg
.p_ttbr_v
= NULL
;
840 * Request a physical mapping
842 int kern_req_phys_map( phys_bytes base_address
, vir_bytes io_size
,
843 int vm_flags
, kern_phys_map
* priv
,
844 kern_phys_map_mapped cb
, vir_bytes id
)
846 /* Assign the values to the given struct and add priv
848 assert(base_address
!= 0);
849 assert(io_size
% ARM_PAGE_SIZE
== 0);
852 priv
->addr
= base_address
;
853 priv
->size
= io_size
;
854 priv
->vm_flags
= vm_flags
;
861 if (kern_phys_map_head
== NULL
){
862 /* keep a list of items this is the first one */
863 kern_phys_map_head
= priv
;
864 kern_phys_map_head
->next
= NULL
;
866 /* insert the item head but first keep track
867 of the current by putting it in next */
868 priv
->next
= kern_phys_map_head
;
869 /* replace the head */
870 kern_phys_map_head
= priv
;
876 * Callback implementation where the id given to the
877 * kern_phys_map is a pointer to the io map base address.
878 * this implementation will just change that base address.
879 * once that area is remapped.
881 int kern_phys_map_mapped_ptr(vir_bytes id
, phys_bytes address
){
882 *((vir_bytes
*)id
) = address
;
887 * Request a physical mapping and put the result in the given prt
888 * Note that ptr will only be valid once the callback happened.
890 int kern_phys_map_ptr(
891 phys_bytes base_address
,
894 kern_phys_map
* priv
,
897 return kern_req_phys_map(base_address
,io_size
,vm_flags
,priv
,kern_phys_map_mapped_ptr
,ptr
);