3 #include "../../kernel.h"
4 #include "../../proc.h"
7 #include <minix/type.h>
8 #include <minix/syslib.h>
9 #include <minix/cpufeature.h>
12 #include <sys/vm_i386.h>
14 #include <minix/portio.h>
17 #include "../../proto.h"
18 #include "../../proto.h"
19 #include "../../debug.h"
23 #ifdef CONFIG_WATCHDOG
24 #include "../../watchdog.h"
30 #define PROCPDEPTR(pr, pi) ((u32_t *) ((u8_t *) vm_pagedirs +\
31 I386_PAGE_SIZE * pr->p_nr + \
32 I386_VM_PT_ENT_SIZE * pi))
34 PUBLIC u8_t
*vm_pagedirs
= NULL
;
37 #define PDEMASK(n) (1L << (n))
38 PUBLIC u32_t dirtypde
; /* Accessed from assembly code. */
39 #define WANT_FREEPDES (sizeof(dirtypde)*8-5)
40 PRIVATE
int nfreepdes
= 0, freepdes
[WANT_FREEPDES
], inusepde
= NOPDE
;
42 #define HASPT(procptr) ((procptr)->p_seg.p_cr3 != 0)
44 FORWARD
_PROTOTYPE( u32_t phys_get32
, (phys_bytes v
) );
45 FORWARD
_PROTOTYPE( void vm_enable_paging
, (void) );
46 FORWARD
_PROTOTYPE( void set_cr3
, (void) );
49 /* *** Internal VM Functions *** */
51 PUBLIC
void vm_init(struct proc
*newptproc
)
54 minix_panic("vm_init: vm_running", NO_NUM
);
55 vm_set_cr3(newptproc
);
56 level0(vm_enable_paging
);
65 /* This macro sets up a mapping from within the kernel's address
66 * space to any other area of memory, either straight physical
67 * memory (PROC == NULL) or a process view of memory, in 4MB chunks.
68 * It recognizes PROC having kernel address space as a special case.
70 * It sets PTR to the pointer within kernel address space at the start
71 * of the 4MB chunk, and OFFSET to the offset within that chunk
72 * that corresponds to LINADDR.
74 * It needs FREEPDE (available and addressable PDE within kernel
75 * address space), SEG (hardware segment), VIRT (in-datasegment
78 #define CREATEPDE(PROC, PTR, LINADDR, REMAIN, BYTES, PDE, TYPE) { \
79 u32_t *pdeptr = NULL; \
81 proc_pde_index = I386_VM_PDE(LINADDR); \
83 if((PROC) && (((PROC) == ptproc) || !HASPT(PROC))) { \
89 u32_t pdeval, *pdevalptr, mask; \
94 vmassert(!iskernelp(PROC)); \
95 vmassert(HASPT(PROC)); \
96 pdeptr = PROCPDEPTR(PROC, proc_pde_index); \
100 pdeval = (LINADDR & I386_VM_ADDR_MASK_4MB) | \
101 I386_VM_BIGPAGE | I386_VM_PRESENT | \
102 I386_VM_WRITE | I386_VM_USER; \
104 for(fp = 0; fp < nfreepdes; fp++) { \
105 int k = freepdes[fp]; \
108 *PROCPDEPTR(ptproc, k) = 0; \
111 vmassert(k < sizeof(dirtypde)*8); \
112 mask = PDEMASK(PDE); \
113 if(dirtypde & mask) \
117 vmassert(PDE != NOPDE); \
119 if(dirtypde & mask) { \
125 *PROCPDEPTR(ptproc, PDE) = pdeval; \
126 offset = LINADDR & I386_VM_OFFSET_MASK_4MB; \
127 PTR = I386_BIG_PAGE_SIZE*PDE + offset; \
128 REMAIN = MIN(REMAIN, I386_BIG_PAGE_SIZE - offset); \
129 if(1 || mustinvl) { \
130 level0(reload_cr3); \
135 #define DONEPDE(PDE) { \
138 vmassert(PDE < sizeof(dirtypde)*8); \
139 dirtypde |= PDEMASK(PDE); \
143 #define WIPEPDE(PDE) { \
146 vmassert(PDE < sizeof(dirtypde)*8); \
147 *PROCPDEPTR(ptproc, PDE) = 0; \
151 /*===========================================================================*
153 *===========================================================================*/
154 PRIVATE
int lin_lin_copy(struct proc
*srcproc
, vir_bytes srclinaddr
,
155 struct proc
*dstproc
, vir_bytes dstlinaddr
, vir_bytes bytes
)
160 NOREC_ENTER(linlincopy
);
162 vmassert(vm_running
);
163 vmassert(nfreepdes
>= 3);
167 vmassert(read_cr3() == ptproc
->p_seg
.p_cr3
);
169 procslot
= ptproc
->p_nr
;
171 vmassert(procslot
>= 0 && procslot
< I386_VM_DIR_ENTRIES
);
174 phys_bytes srcptr
, dstptr
;
175 vir_bytes chunk
= bytes
;
177 int srctype
, dsttype
;
179 /* Set up 4MB ranges. */
181 CREATEPDE(srcproc
, srcptr
, srclinaddr
, chunk
, bytes
, srcpde
, srctype
);
182 CREATEPDE(dstproc
, dstptr
, dstlinaddr
, chunk
, bytes
, dstpde
, dsttype
);
185 PHYS_COPY_CATCH(srcptr
, dstptr
, chunk
, addr
);
191 /* If addr is nonzero, a page fault was caught. */
193 if(addr
>= srcptr
&& addr
< (srcptr
+ chunk
)) {
196 NOREC_RETURN(linlincopy
, EFAULT_SRC
);
198 if(addr
>= dstptr
&& addr
< (dstptr
+ chunk
)) {
201 NOREC_RETURN(linlincopy
, EFAULT_DST
);
204 minix_panic("lin_lin_copy fault out of range", NO_NUM
);
207 NOREC_RETURN(linlincopy
, EFAULT
);
213 /* Update counter and addresses for next iteration, if any. */
219 NOREC_RETURN(linlincopy
, OK
);
223 PRIVATE u32_t
phys_get32(phys_bytes addr
)
229 phys_copy(addr
, vir2phys(&v
), sizeof(v
));
233 if((r
=lin_lin_copy(NULL
, addr
,
234 proc_addr(SYSTEM
), vir2phys(&v
), sizeof(v
))) != OK
) {
235 minix_panic("lin_lin_copy for phys_get32 failed", r
);
241 PRIVATE u32_t vm_cr3
; /* temp arg to level0() func */
243 PRIVATE
void set_cr3()
248 PUBLIC
void vm_set_cr3(struct proc
*newptproc
)
251 if(!intr_disabled()) { lock
; u
= 1; }
252 vm_cr3
= newptproc
->p_seg
.p_cr3
;
260 PRIVATE
char *cr0_str(u32_t e
)
264 #define FLAG(v) do { if(e & (v)) { strcat(str, #v " "); e &= ~v; } } while(0)
272 if(e
) { strcat(str
, " (++)"); }
276 PRIVATE
char *cr4_str(u32_t e
)
288 if(e
) { strcat(str
, " (++)"); }
292 PRIVATE
void vm_enable_paging(void)
297 psok
= _cpufeature(_CPUF_I386_PSE
);
298 pgeok
= _cpufeature(_CPUF_I386_PGE
);
303 /* First clear PG and PGE flag, as PGE must be enabled after PG. */
304 write_cr0(cr0
& ~I386_CR0_PG
);
305 write_cr4(cr4
& ~(I386_CR4_PGE
| I386_CR4_PSE
));
310 /* Our first page table contains 4MB entries. */
316 /* First enable paging, then enable global page flag. */
322 /* May we enable these features? */
329 PUBLIC vir_bytes
alloc_remote_segment(u32_t
*selector
,
330 segframe_t
*segments
, int index
, phys_bytes phys
, vir_bytes size
,
333 phys_bytes offset
= 0;
334 /* Check if the segment size can be recorded in bytes, that is, check
335 * if descriptor's limit field can delimited the allowed memory region
336 * precisely. This works up to 1MB. If the size is larger, 4K pages
337 * instead of bytes are used.
339 if (size
< BYTE_GRAN_MAX
) {
340 init_dataseg(&segments
->p_ldt
[EXTRA_LDT_INDEX
+index
],
342 *selector
= ((EXTRA_LDT_INDEX
+index
)*0x08) | (1*0x04) | priv
;
345 init_dataseg(&segments
->p_ldt
[EXTRA_LDT_INDEX
+index
],
346 phys
& ~0xFFFF, 0, priv
);
347 *selector
= ((EXTRA_LDT_INDEX
+index
)*0x08) | (1*0x04) | priv
;
348 offset
= phys
& 0xFFFF;
354 PUBLIC phys_bytes
umap_remote(struct proc
* rp
, int seg
,
355 vir_bytes vir_addr
, vir_bytes bytes
)
357 /* Calculate the physical memory address for a given virtual address. */
361 if(rp
->p_misc_flags
& MF_FULLVM
) return 0;
364 if (bytes
<= 0) return( (phys_bytes
) 0);
365 if (seg
< 0 || seg
>= NR_REMOTE_SEGS
) return( (phys_bytes
) 0);
367 fm
= &rp
->p_priv
->s_farmem
[seg
];
368 if (! fm
->in_use
) return( (phys_bytes
) 0);
369 if (vir_addr
+ bytes
> fm
->mem_len
) return( (phys_bytes
) 0);
371 return(fm
->mem_phys
+ (phys_bytes
) vir_addr
);
374 /*===========================================================================*
376 *===========================================================================*/
377 PUBLIC phys_bytes
umap_local(rp
, seg
, vir_addr
, bytes
)
378 register struct proc
*rp
; /* pointer to proc table entry for process */
379 int seg
; /* T, D, or S segment */
380 vir_bytes vir_addr
; /* virtual address in bytes within the seg */
381 vir_bytes bytes
; /* # of bytes to be copied */
383 /* Calculate the physical memory address for a given virtual address. */
384 vir_clicks vc
; /* the virtual address in clicks */
385 phys_bytes pa
; /* intermediate variables as phys_bytes */
388 if(seg
!= T
&& seg
!= D
&& seg
!= S
)
389 minix_panic("umap_local: wrong seg", seg
);
391 if (bytes
<= 0) return( (phys_bytes
) 0);
392 if (vir_addr
+ bytes
<= vir_addr
) return 0; /* overflow */
393 vc
= (vir_addr
+ bytes
- 1) >> CLICK_SHIFT
; /* last click of data */
396 seg
= (vc
< rp
->p_memmap
[D
].mem_vir
+ rp
->p_memmap
[D
].mem_len
? D
: S
);
397 else if (rp
->p_memmap
[T
].mem_len
== 0) /* common I&D? */
398 seg
= D
; /* ptrace needs this */
400 if ((vir_addr
>>CLICK_SHIFT
) >= rp
->p_memmap
[seg
].mem_vir
+
401 rp
->p_memmap
[seg
].mem_len
) return( (phys_bytes
) 0 );
403 if (vc
>= rp
->p_memmap
[seg
].mem_vir
+
404 rp
->p_memmap
[seg
].mem_len
) return( (phys_bytes
) 0 );
406 seg_base
= (phys_bytes
) rp
->p_memmap
[seg
].mem_phys
;
407 seg_base
= seg_base
<< CLICK_SHIFT
; /* segment origin in bytes */
408 pa
= (phys_bytes
) vir_addr
;
409 pa
-= rp
->p_memmap
[seg
].mem_vir
<< CLICK_SHIFT
;
410 return(seg_base
+ pa
);
413 /*===========================================================================*
415 *===========================================================================*/
416 PUBLIC phys_bytes
umap_virtual(rp
, seg
, vir_addr
, bytes
)
417 register struct proc
*rp
; /* pointer to proc table entry for process */
418 int seg
; /* T, D, or S segment */
419 vir_bytes vir_addr
; /* virtual address in bytes within the seg */
420 vir_bytes bytes
; /* # of bytes to be copied */
425 if(seg
== MEM_GRANT
) {
426 return umap_grant(rp
, vir_addr
, bytes
);
429 if(!(linear
= umap_local(rp
, seg
, vir_addr
, bytes
))) {
430 kprintf("SYSTEM:umap_virtual: umap_local failed\n");
433 if(vm_lookup(rp
, linear
, &phys
, NULL
) != OK
) {
434 kprintf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%lx: 0x%lx failed\n", rp
->p_name
, seg
, vir_addr
);
438 minix_panic("vm_lookup returned phys", phys
);
443 kprintf("SYSTEM:umap_virtual: lookup failed\n");
447 /* Now make sure addresses are contiguous in physical memory
448 * so that the umap makes sense.
450 if(bytes
> 0 && !vm_contiguous(rp
, linear
, bytes
)) {
451 kprintf("umap_virtual: %s: %d at 0x%lx (vir 0x%lx) not contiguous\n",
452 rp
->p_name
, bytes
, linear
, vir_addr
);
456 /* phys must be larger than 0 (or the caller will think the call
457 * failed), and address must not cross a page boundary.
465 /*===========================================================================*
467 *===========================================================================*/
468 PUBLIC
int vm_lookup(struct proc
*proc
, vir_bytes
virtual, vir_bytes
*physical
, u32_t
*ptent
)
473 NOREC_ENTER(vmlookup
);
477 vmassert(!isemptyp(proc
));
481 NOREC_RETURN(vmlookup
, OK
);
484 /* Retrieve page directory entry. */
485 root
= (u32_t
*) proc
->p_seg
.p_cr3
;
486 vmassert(!((u32_t
) root
% I386_PAGE_SIZE
));
487 pde
= I386_VM_PDE(virtual);
488 vmassert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
489 pde_v
= phys_get32((u32_t
) (root
+ pde
));
491 if(!(pde_v
& I386_VM_PRESENT
)) {
492 NOREC_RETURN(vmlookup
, EFAULT
);
495 /* We don't expect to ever see this. */
496 if(pde_v
& I386_VM_BIGPAGE
) {
497 *physical
= pde_v
& I386_VM_ADDR_MASK_4MB
;
498 if(ptent
) *ptent
= pde_v
;
499 *physical
+= virtual & I386_VM_OFFSET_MASK_4MB
;
501 /* Retrieve page table entry. */
502 pt
= (u32_t
*) I386_VM_PFA(pde_v
);
503 vmassert(!((u32_t
) pt
% I386_PAGE_SIZE
));
504 pte
= I386_VM_PTE(virtual);
505 vmassert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
506 pte_v
= phys_get32((u32_t
) (pt
+ pte
));
507 if(!(pte_v
& I386_VM_PRESENT
)) {
508 NOREC_RETURN(vmlookup
, EFAULT
);
511 if(ptent
) *ptent
= pte_v
;
513 /* Actual address now known; retrieve it and add page offset. */
514 *physical
= I386_VM_PFA(pte_v
);
515 *physical
+= virtual % I386_PAGE_SIZE
;
518 NOREC_RETURN(vmlookup
, OK
);
521 /*===========================================================================*
523 *===========================================================================*/
524 PUBLIC
int vm_contiguous(struct proc
*targetproc
, u32_t vir_buf
, size_t bytes
)
526 int first
= 1, r
, boundaries
= 0;
530 vmassert(targetproc
);
533 if(!HASPT(targetproc
))
536 /* Start and end at page boundary to make logic simpler. */
537 po
= vir_buf
% I386_PAGE_SIZE
;
542 po
= (vir_buf
+ bytes
) % I386_PAGE_SIZE
;
544 bytes
+= I386_PAGE_SIZE
- po
;
546 /* Keep going as long as we cross a page boundary. */
550 if((r
=vm_lookup(targetproc
, vir_buf
, &phys
, NULL
)) != OK
) {
551 kprintf("vm_contiguous: vm_lookup failed, %d\n", r
);
552 kprintf("kernel stack: ");
558 if(prev_phys
+I386_PAGE_SIZE
!= phys
) {
559 kprintf("vm_contiguous: no (0x%lx, 0x%lx)\n",
561 kprintf("kernel stack: ");
571 vir_buf
+= I386_PAGE_SIZE
;
572 bytes
-= I386_PAGE_SIZE
;
579 /*===========================================================================*
581 *===========================================================================*/
582 PRIVATE
void vm_suspend(struct proc
*caller
, struct proc
*target
,
583 vir_bytes linaddr
, vir_bytes len
, int wrflag
, int type
)
585 /* This range is not OK for this process. Set parameters
586 * of the request and notify VM about the pending request.
588 vmassert(!RTS_ISSET(caller
, RTS_VMREQUEST
));
589 vmassert(!RTS_ISSET(target
, RTS_VMREQUEST
));
591 RTS_LOCK_SET(caller
, RTS_VMREQUEST
);
594 caller
->p_vmrequest
.stacktrace
[0] = '\0';
595 util_stacktrace_strcat(caller
->p_vmrequest
.stacktrace
);
598 caller
->p_vmrequest
.req_type
= VMPTYPE_CHECK
;
599 caller
->p_vmrequest
.target
= target
->p_endpoint
;
600 caller
->p_vmrequest
.params
.check
.start
= linaddr
;
601 caller
->p_vmrequest
.params
.check
.length
= len
;
602 caller
->p_vmrequest
.params
.check
.writeflag
= 1;
603 caller
->p_vmrequest
.type
= type
;
605 /* Connect caller on vmrequest wait queue. */
606 if(!(caller
->p_vmrequest
.nextrequestor
= vmrequest
))
607 mini_notify(proc_addr(SYSTEM
), VM_PROC_NR
);
611 /*===========================================================================*
613 *===========================================================================*/
614 int delivermsg(struct proc
*rp
)
618 NOREC_ENTER(deliver
);
620 vmassert(rp
->p_misc_flags
& MF_DELIVERMSG
);
621 vmassert(rp
->p_delivermsg
.m_source
!= NONE
);
623 vmassert(rp
->p_delivermsg_lin
);
625 if(rp
->p_delivermsg_lin
!=
626 umap_local(rp
, D
, rp
->p_delivermsg_vir
, sizeof(message
))) {
627 printf("vir: 0x%lx lin was: 0x%lx umap now: 0x%lx\n",
628 rp
->p_delivermsg_vir
, rp
->p_delivermsg_lin
,
629 umap_local(rp
, D
, rp
->p_delivermsg_vir
, sizeof(message
)));
630 minix_panic("that's wrong", NO_NUM
);
637 PHYS_COPY_CATCH(vir2phys(&rp
->p_delivermsg
),
638 rp
->p_delivermsg_lin
, sizeof(message
), addr
);
641 vm_suspend(rp
, rp
, rp
->p_delivermsg_lin
, sizeof(message
), 1,
646 rp
->p_delivermsg
.m_source
= NONE
;
647 rp
->p_delivermsg_lin
= 0;
649 rp
->p_misc_flags
&= ~MF_DELIVERMSG
;
653 NOREC_RETURN(deliver
, r
);
656 PRIVATE
char *flagstr(u32_t e
, int dir
)
660 FLAG(I386_VM_PRESENT
);
665 FLAG(I386_VM_GLOBAL
);
667 FLAG(I386_VM_BIGPAGE
); /* Page directory entry only */
669 FLAG(I386_VM_DIRTY
); /* Page table entry only */
673 PRIVATE
void vm_pt_print(u32_t
*pagetable
, u32_t v
)
678 vmassert(!((u32_t
) pagetable
% I386_PAGE_SIZE
));
680 for(pte
= 0; pte
< I386_VM_PT_ENTRIES
; pte
++) {
682 pte_v
= phys_get32((u32_t
) (pagetable
+ pte
));
683 if(!(pte_v
& I386_VM_PRESENT
))
685 pfa
= I386_VM_PFA(pte_v
);
686 kprintf("%4d:%08lx:%08lx %2s ",
687 pte
, v
+ I386_PAGE_SIZE
*pte
, pfa
,
688 (pte_v
& I386_VM_WRITE
) ? "rw":"RO");
690 if(col
== 3) { kprintf("\n"); col
= 0; }
692 if(col
> 0) kprintf("\n");
697 PRIVATE
void vm_print(u32_t
*root
)
701 vmassert(!((u32_t
) root
% I386_PAGE_SIZE
));
703 printf("page table 0x%lx:\n", root
);
705 for(pde
= 0; pde
< I386_VM_DIR_ENTRIES
; pde
++) {
708 pde_v
= phys_get32((u32_t
) (root
+ pde
));
709 if(!(pde_v
& I386_VM_PRESENT
))
711 if(pde_v
& I386_VM_BIGPAGE
) {
712 kprintf("%4d: 0x%lx, flags %s\n",
713 pde
, I386_VM_PFA(pde_v
), flagstr(pde_v
, 1));
715 pte_a
= (u32_t
*) I386_VM_PFA(pde_v
);
716 kprintf("%4d: pt %08lx %s\n",
717 pde
, pte_a
, flagstr(pde_v
, 1));
718 vm_pt_print(pte_a
, pde
* I386_VM_PT_ENTRIES
* I386_PAGE_SIZE
);
736 /*===========================================================================*
738 *===========================================================================*/
739 int vm_phys_memset(phys_bytes ph
, u8_t c
, phys_bytes bytes
)
742 NOREC_ENTER(physmemset
);
744 p
= c
| (c
<< 8) | (c
<< 16) | (c
<< 24);
747 phys_memset(ph
, p
, bytes
);
748 NOREC_RETURN(physmemset
, OK
);
751 vmassert(nfreepdes
>= 3);
753 /* With VM, we have to map in the physical memory.
754 * We can do this 4MB at a time.
758 vir_bytes chunk
= (vir_bytes
) bytes
;
761 CREATEPDE(((struct proc
*) NULL
), ptr
, ph
, chunk
, bytes
, pde
, t
);
762 /* We can memset as many bytes as we have remaining,
763 * or as many as remain in the 4MB chunk we mapped in.
765 phys_memset(ptr
, p
, chunk
);
772 NOREC_RETURN(physmemset
, OK
);
775 /*===========================================================================*
777 *===========================================================================*/
778 PUBLIC
int virtual_copy_f(caller
, src_addr
, dst_addr
, bytes
, vmcheck
)
779 struct proc
* caller
;
780 struct vir_addr
*src_addr
; /* source virtual address */
781 struct vir_addr
*dst_addr
; /* destination virtual address */
782 vir_bytes bytes
; /* # of bytes to copy */
783 int vmcheck
; /* if nonzero, can return VMSUSPEND */
785 /* Copy bytes from virtual address src_addr to virtual address dst_addr.
786 * Virtual addresses can be in ABS, LOCAL_SEG, REMOTE_SEG, or BIOS_SEG.
788 struct vir_addr
*vir_addr
[2]; /* virtual source and destination address */
789 phys_bytes phys_addr
[2]; /* absolute source and destination */
792 struct proc
*procs
[2];
793 NOREC_ENTER(virtualcopy
);
795 vmassert((vmcheck
&& caller
) || (!vmcheck
&& !caller
));
797 /* Check copy count. */
798 if (bytes
<= 0) return(EDOM
);
800 /* Do some more checks and map virtual addresses to physical addresses. */
801 vir_addr
[_SRC_
] = src_addr
;
802 vir_addr
[_DST_
] = dst_addr
;
804 for (i
=_SRC_
; i
<=_DST_
; i
++) {
808 type
= vir_addr
[i
]->segment
& SEGMENT_TYPE
;
809 if((type
!= PHYS_SEG
&& type
!= BIOS_SEG
) &&
810 isokendpt(vir_addr
[i
]->proc_nr_e
, &proc_nr
))
811 p
= proc_addr(proc_nr
);
817 /* Get physical address. */
822 NOREC_RETURN(virtualcopy
, EDEADSRCDST
);
824 seg_index
= vir_addr
[i
]->segment
& SEGMENT_INDEX
;
825 if(type
== LOCAL_SEG
)
826 phys_addr
[i
] = umap_local(p
, seg_index
, vir_addr
[i
]->offset
,
829 phys_addr
[i
] = umap_virtual(p
, seg_index
,
830 vir_addr
[i
]->offset
, bytes
);
831 if(phys_addr
[i
] == 0) {
832 kprintf("virtual_copy: map 0x%x failed for %s seg %d, "
833 "offset %lx, len %d, i %d\n",
834 type
, p
->p_name
, seg_index
, vir_addr
[i
]->offset
,
840 NOREC_RETURN(virtualcopy
, EDEADSRCDST
);
842 seg_index
= vir_addr
[i
]->segment
& SEGMENT_INDEX
;
843 phys_addr
[i
] = umap_remote(p
, seg_index
, vir_addr
[i
]->offset
, bytes
);
845 #if _MINIX_CHIP == _CHIP_INTEL
847 phys_addr
[i
] = umap_bios(vir_addr
[i
]->offset
, bytes
);
851 phys_addr
[i
] = vir_addr
[i
]->offset
;
854 kprintf("virtual_copy: strange type 0x%x\n", type
);
855 NOREC_RETURN(virtualcopy
, EINVAL
);
858 /* Check if mapping succeeded. */
859 if (phys_addr
[i
] <= 0 && vir_addr
[i
]->segment
!= PHYS_SEG
) {
860 kprintf("virtual_copy EFAULT\n");
861 NOREC_RETURN(virtualcopy
, EFAULT
);
868 if(caller
&& RTS_ISSET(caller
, RTS_VMREQUEST
)) {
869 vmassert(caller
->p_vmrequest
.vmresult
!= VMSUSPEND
);
870 RTS_LOCK_UNSET(caller
, RTS_VMREQUEST
);
871 if(caller
->p_vmrequest
.vmresult
!= OK
) {
873 printf("virtual_copy: returning VM error %d\n",
874 caller
->p_vmrequest
.vmresult
);
876 NOREC_RETURN(virtualcopy
, caller
->p_vmrequest
.vmresult
);
880 if((r
=lin_lin_copy(procs
[_SRC_
], phys_addr
[_SRC_
],
881 procs
[_DST_
], phys_addr
[_DST_
], bytes
)) != OK
) {
885 if(r
!= EFAULT_SRC
&& r
!= EFAULT_DST
)
886 minix_panic("lin_lin_copy failed", r
);
887 if(!vmcheck
|| !caller
) {
888 NOREC_RETURN(virtualcopy
, r
);
891 vmassert(procs
[_SRC_
] && procs
[_DST_
]);
893 if(r
== EFAULT_SRC
) {
894 lin
= phys_addr
[_SRC_
];
895 target
= procs
[_SRC_
];
897 } else if(r
== EFAULT_DST
) {
898 lin
= phys_addr
[_DST_
];
899 target
= procs
[_DST_
];
902 minix_panic("r strange", r
);
906 printf("virtual_copy: suspending caller %d / %s, target %d / %s\n",
907 caller
->p_endpoint
, caller
->p_name
,
908 target
->p_endpoint
, target
->p_name
);
911 vmassert(proc_ptr
->p_endpoint
== SYSTEM
);
912 vm_suspend(caller
, target
, lin
, bytes
, wr
,
914 NOREC_RETURN(virtualcopy
, VMSUSPEND
);
917 NOREC_RETURN(virtualcopy
, OK
);
920 vmassert(!vm_running
);
922 /* can't copy to/from process with PT without VM */
923 #define NOPT(p) (!(p) || !HASPT(p))
924 if(!NOPT(procs
[_SRC_
])) {
925 kprintf("ignoring page table src: %s / %d at 0x%lx\n",
926 procs
[_SRC_
]->p_name
, procs
[_SRC_
]->p_endpoint
, procs
[_SRC_
]->p_seg
.p_cr3
);
928 if(!NOPT(procs
[_DST_
])) {
929 kprintf("ignoring page table dst: %s / %d at 0x%lx\n",
930 procs
[_DST_
]->p_name
, procs
[_DST_
]->p_endpoint
,
931 procs
[_DST_
]->p_seg
.p_cr3
);
934 /* Now copy bytes between physical addresseses. */
935 if(phys_copy(phys_addr
[_SRC_
], phys_addr
[_DST_
], (phys_bytes
) bytes
))
936 NOREC_RETURN(virtualcopy
, EFAULT
);
938 NOREC_RETURN(virtualcopy
, OK
);
941 /*===========================================================================*
943 *===========================================================================*/
944 PUBLIC
int data_copy(endpoint_t from_proc
, vir_bytes from_addr
,
945 endpoint_t to_proc
, vir_bytes to_addr
,
948 struct vir_addr src
, dst
;
950 src
.segment
= dst
.segment
= D
;
951 src
.offset
= from_addr
;
952 dst
.offset
= to_addr
;
953 src
.proc_nr_e
= from_proc
;
954 dst
.proc_nr_e
= to_proc
;
956 return virtual_copy(&src
, &dst
, bytes
);
959 /*===========================================================================*
960 * data_copy_vmcheck *
961 *===========================================================================*/
962 PUBLIC
int data_copy_vmcheck(struct proc
* caller
,
963 endpoint_t from_proc
, vir_bytes from_addr
,
964 endpoint_t to_proc
, vir_bytes to_addr
,
967 struct vir_addr src
, dst
;
969 src
.segment
= dst
.segment
= D
;
970 src
.offset
= from_addr
;
971 dst
.offset
= to_addr
;
972 src
.proc_nr_e
= from_proc
;
973 dst
.proc_nr_e
= to_proc
;
975 return virtual_copy_vmcheck(caller
, &src
, &dst
, bytes
);
978 /*===========================================================================*
980 *===========================================================================*/
981 PUBLIC
void arch_pre_exec(struct proc
*pr
, u32_t ip
, u32_t sp
)
983 /* wipe extra LDT entries, set program counter, and stack pointer. */
984 memset(pr
->p_seg
.p_ldt
+ EXTRA_LDT_INDEX
, 0,
985 sizeof(pr
->p_seg
.p_ldt
[0]) * (LDT_SIZE
- EXTRA_LDT_INDEX
));
990 /*===========================================================================*
992 *===========================================================================*/
993 PUBLIC
int arch_umap(struct proc
*pr
, vir_bytes offset
, vir_bytes count
,
994 int seg
, phys_bytes
*addr
)
998 *addr
= umap_bios(offset
, count
);
1002 /* This must be EINVAL; the umap fallback function in
1003 * lib/syslib/alloc_util.c depends on it to detect an
1004 * older kernel (as opposed to mapping error).
1009 /* VM reports page directory slot we're allowed to use freely. */
1010 void i386_freepde(int pde
)
1012 if(nfreepdes
>= WANT_FREEPDES
)
1014 freepdes
[nfreepdes
++] = pde
;
1017 PUBLIC
arch_phys_map(int index
, phys_bytes
*addr
, phys_bytes
*len
, int *flags
)
1020 /* map the local APIC if enabled */
1021 if (index
== 0 && lapic_addr
) {
1022 *addr
= vir2phys(lapic_addr
);
1023 *len
= 4 << 10 /* 4kB */;
1024 *flags
= VMMF_UNCACHED
;
1029 /* we don't want anything */
1034 PUBLIC
int arch_phys_map_reply(int index
, vir_bytes addr
)
1037 /* if local APIC is enabled */
1038 if (index
== 0 && lapic_addr
) {
1039 lapic_addr_vaddr
= addr
;
1045 PUBLIC
int arch_enable_paging(void)
1048 /* if local APIC is enabled */
1050 lapic_addr
= lapic_addr_vaddr
;
1051 lapic_eoi_addr
= LAPIC_EOI
;
1054 #ifdef CONFIG_WATCHDOG
1056 * We make sure that we don't enable the watchdog until paging is turned
1057 * on as we might get a NMI while switching and we might still use wrong
1058 * lapic address. Bad things would happen. It is unfortunate but such is
1061 level0(i386_watchdog_start
);