3 #include "../../kernel.h"
4 #include "../../proc.h"
7 #include <minix/type.h>
8 #include <minix/syslib.h>
9 #include <minix/cpufeature.h>
12 #include <sys/vm_i386.h>
15 #include "../../proto.h"
16 #include "../../proto.h"
17 #include "../../debug.h"
21 #ifdef CONFIG_WATCHDOG
22 #include "../../watchdog.h"
28 #define PROCPDEPTR(pr, pi) ((u32_t *) ((u8_t *) vm_pagedirs +\
29 I386_PAGE_SIZE * pr->p_nr + \
30 I386_VM_PT_ENT_SIZE * pi))
32 PUBLIC u8_t
*vm_pagedirs
= NULL
;
35 #define PDEMASK(n) (1L << (n))
36 PUBLIC u32_t dirtypde
; /* Accessed from assembly code. */
37 #define WANT_FREEPDES (sizeof(dirtypde)*8-5)
38 PRIVATE
int nfreepdes
= 0, freepdes
[WANT_FREEPDES
], inusepde
= NOPDE
;
40 #define HASPT(procptr) ((procptr)->p_seg.p_cr3 != 0)
42 FORWARD
_PROTOTYPE( u32_t phys_get32
, (phys_bytes v
) );
43 FORWARD
_PROTOTYPE( void vm_enable_paging
, (void) );
46 /* *** Internal VM Functions *** */
48 PUBLIC
void vm_init(struct proc
*newptproc
)
51 minix_panic("vm_init: vm_running", NO_NUM
);
52 switch_address_space(newptproc
);
62 /* This macro sets up a mapping from within the kernel's address
63 * space to any other area of memory, either straight physical
64 * memory (PROC == NULL) or a process view of memory, in 4MB chunks.
65 * It recognizes PROC having kernel address space as a special case.
67 * It sets PTR to the pointer within kernel address space at the start
68 * of the 4MB chunk, and OFFSET to the offset within that chunk
69 * that corresponds to LINADDR.
71 * It needs FREEPDE (available and addressable PDE within kernel
72 * address space), SEG (hardware segment), VIRT (in-datasegment
75 #define CREATEPDE(PROC, PTR, LINADDR, REMAIN, BYTES, PDE, TYPE) { \
76 u32_t *pdeptr = NULL; \
78 proc_pde_index = I386_VM_PDE(LINADDR); \
80 if((PROC) && (((PROC) == ptproc) || !HASPT(PROC))) { \
91 vmassert(!iskernelp(PROC)); \
92 vmassert(HASPT(PROC)); \
93 pdeptr = PROCPDEPTR(PROC, proc_pde_index); \
97 pdeval = (LINADDR & I386_VM_ADDR_MASK_4MB) | \
98 I386_VM_BIGPAGE | I386_VM_PRESENT | \
99 I386_VM_WRITE | I386_VM_USER; \
101 for(fp = 0; fp < nfreepdes; fp++) { \
102 int k = freepdes[fp]; \
105 *PROCPDEPTR(ptproc, k) = 0; \
108 vmassert(k < sizeof(dirtypde)*8); \
109 mask = PDEMASK(PDE); \
110 if(dirtypde & mask) \
114 vmassert(PDE != NOPDE); \
116 if(dirtypde & mask) { \
122 *PROCPDEPTR(ptproc, PDE) = pdeval; \
123 offset = LINADDR & I386_VM_OFFSET_MASK_4MB; \
124 PTR = I386_BIG_PAGE_SIZE*PDE + offset; \
125 REMAIN = MIN(REMAIN, I386_BIG_PAGE_SIZE - offset); \
126 if(1 || mustinvl) { \
132 #define DONEPDE(PDE) { \
135 vmassert(PDE < sizeof(dirtypde)*8); \
136 dirtypde |= PDEMASK(PDE); \
140 #define WIPEPDE(PDE) { \
143 vmassert(PDE < sizeof(dirtypde)*8); \
144 *PROCPDEPTR(ptproc, PDE) = 0; \
148 /*===========================================================================*
150 *===========================================================================*/
151 PRIVATE
int lin_lin_copy(struct proc
*srcproc
, vir_bytes srclinaddr
,
152 struct proc
*dstproc
, vir_bytes dstlinaddr
, vir_bytes bytes
)
157 NOREC_ENTER(linlincopy
);
159 vmassert(vm_running
);
160 vmassert(nfreepdes
>= 3);
164 vmassert(getcr3val() == ptproc
->p_seg
.p_cr3
);
166 procslot
= ptproc
->p_nr
;
168 vmassert(procslot
>= 0 && procslot
< I386_VM_DIR_ENTRIES
);
171 phys_bytes srcptr
, dstptr
;
172 vir_bytes chunk
= bytes
;
174 int srctype
, dsttype
;
176 /* Set up 4MB ranges. */
178 CREATEPDE(srcproc
, srcptr
, srclinaddr
, chunk
, bytes
, srcpde
, srctype
);
179 CREATEPDE(dstproc
, dstptr
, dstlinaddr
, chunk
, bytes
, dstpde
, dsttype
);
182 PHYS_COPY_CATCH(srcptr
, dstptr
, chunk
, addr
);
188 /* If addr is nonzero, a page fault was caught. */
190 if(addr
>= srcptr
&& addr
< (srcptr
+ chunk
)) {
193 NOREC_RETURN(linlincopy
, EFAULT_SRC
);
195 if(addr
>= dstptr
&& addr
< (dstptr
+ chunk
)) {
198 NOREC_RETURN(linlincopy
, EFAULT_DST
);
201 minix_panic("lin_lin_copy fault out of range", NO_NUM
);
204 NOREC_RETURN(linlincopy
, EFAULT
);
210 /* Update counter and addresses for next iteration, if any. */
216 NOREC_RETURN(linlincopy
, OK
);
220 PRIVATE u32_t
phys_get32(phys_bytes addr
)
226 phys_copy(addr
, vir2phys(&v
), sizeof(v
));
230 if((r
=lin_lin_copy(NULL
, addr
,
231 proc_addr(SYSTEM
), vir2phys(&v
), sizeof(v
))) != OK
) {
232 minix_panic("lin_lin_copy for phys_get32 failed", r
);
238 PRIVATE
char *cr0_str(u32_t e
)
242 #define FLAG(v) do { if(e & (v)) { strcat(str, #v " "); e &= ~v; } } while(0)
250 if(e
) { strcat(str
, " (++)"); }
254 PRIVATE
char *cr4_str(u32_t e
)
266 if(e
) { strcat(str
, " (++)"); }
270 PRIVATE
void vm_enable_paging(void)
275 psok
= _cpufeature(_CPUF_I386_PSE
);
276 pgeok
= _cpufeature(_CPUF_I386_PGE
);
281 /* First clear PG and PGE flag, as PGE must be enabled after PG. */
282 write_cr0(cr0
& ~I386_CR0_PG
);
283 write_cr4(cr4
& ~(I386_CR4_PGE
| I386_CR4_PSE
));
288 /* Our first page table contains 4MB entries. */
294 /* First enable paging, then enable global page flag. */
300 /* May we enable these features? */
307 PUBLIC vir_bytes
alloc_remote_segment(u32_t
*selector
,
308 segframe_t
*segments
, int index
, phys_bytes phys
, vir_bytes size
,
311 phys_bytes offset
= 0;
312 /* Check if the segment size can be recorded in bytes, that is, check
313 * if descriptor's limit field can delimited the allowed memory region
314 * precisely. This works up to 1MB. If the size is larger, 4K pages
315 * instead of bytes are used.
317 if (size
< BYTE_GRAN_MAX
) {
318 init_dataseg(&segments
->p_ldt
[EXTRA_LDT_INDEX
+index
],
320 *selector
= ((EXTRA_LDT_INDEX
+index
)*0x08) | (1*0x04) | priv
;
323 init_dataseg(&segments
->p_ldt
[EXTRA_LDT_INDEX
+index
],
324 phys
& ~0xFFFF, 0, priv
);
325 *selector
= ((EXTRA_LDT_INDEX
+index
)*0x08) | (1*0x04) | priv
;
326 offset
= phys
& 0xFFFF;
332 PUBLIC phys_bytes
umap_remote(struct proc
* rp
, int seg
,
333 vir_bytes vir_addr
, vir_bytes bytes
)
335 /* Calculate the physical memory address for a given virtual address. */
339 if(rp
->p_misc_flags
& MF_FULLVM
) return 0;
342 if (bytes
<= 0) return( (phys_bytes
) 0);
343 if (seg
< 0 || seg
>= NR_REMOTE_SEGS
) return( (phys_bytes
) 0);
345 fm
= &rp
->p_priv
->s_farmem
[seg
];
346 if (! fm
->in_use
) return( (phys_bytes
) 0);
347 if (vir_addr
+ bytes
> fm
->mem_len
) return( (phys_bytes
) 0);
349 return(fm
->mem_phys
+ (phys_bytes
) vir_addr
);
352 /*===========================================================================*
354 *===========================================================================*/
355 PUBLIC phys_bytes
umap_local(rp
, seg
, vir_addr
, bytes
)
356 register struct proc
*rp
; /* pointer to proc table entry for process */
357 int seg
; /* T, D, or S segment */
358 vir_bytes vir_addr
; /* virtual address in bytes within the seg */
359 vir_bytes bytes
; /* # of bytes to be copied */
361 /* Calculate the physical memory address for a given virtual address. */
362 vir_clicks vc
; /* the virtual address in clicks */
363 phys_bytes pa
; /* intermediate variables as phys_bytes */
366 if(seg
!= T
&& seg
!= D
&& seg
!= S
)
367 minix_panic("umap_local: wrong seg", seg
);
369 if (bytes
<= 0) return( (phys_bytes
) 0);
370 if (vir_addr
+ bytes
<= vir_addr
) return 0; /* overflow */
371 vc
= (vir_addr
+ bytes
- 1) >> CLICK_SHIFT
; /* last click of data */
374 seg
= (vc
< rp
->p_memmap
[D
].mem_vir
+ rp
->p_memmap
[D
].mem_len
? D
: S
);
375 else if (rp
->p_memmap
[T
].mem_len
== 0) /* common I&D? */
376 seg
= D
; /* ptrace needs this */
378 if ((vir_addr
>>CLICK_SHIFT
) >= rp
->p_memmap
[seg
].mem_vir
+
379 rp
->p_memmap
[seg
].mem_len
) return( (phys_bytes
) 0 );
381 if (vc
>= rp
->p_memmap
[seg
].mem_vir
+
382 rp
->p_memmap
[seg
].mem_len
) return( (phys_bytes
) 0 );
384 seg_base
= (phys_bytes
) rp
->p_memmap
[seg
].mem_phys
;
385 seg_base
= seg_base
<< CLICK_SHIFT
; /* segment origin in bytes */
386 pa
= (phys_bytes
) vir_addr
;
387 pa
-= rp
->p_memmap
[seg
].mem_vir
<< CLICK_SHIFT
;
388 return(seg_base
+ pa
);
391 /*===========================================================================*
393 *===========================================================================*/
394 PUBLIC phys_bytes
umap_virtual(rp
, seg
, vir_addr
, bytes
)
395 register struct proc
*rp
; /* pointer to proc table entry for process */
396 int seg
; /* T, D, or S segment */
397 vir_bytes vir_addr
; /* virtual address in bytes within the seg */
398 vir_bytes bytes
; /* # of bytes to be copied */
403 if(seg
== MEM_GRANT
) {
404 return umap_grant(rp
, (cp_grant_id_t
) vir_addr
, bytes
);
407 if(!(linear
= umap_local(rp
, seg
, vir_addr
, bytes
))) {
408 printf("SYSTEM:umap_virtual: umap_local failed\n");
411 if(vm_lookup(rp
, linear
, &phys
, NULL
) != OK
) {
412 printf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%lx: 0x%lx failed\n", rp
->p_name
, seg
, vir_addr
);
416 minix_panic("vm_lookup returned phys", phys
);
421 printf("SYSTEM:umap_virtual: lookup failed\n");
425 /* Now make sure addresses are contiguous in physical memory
426 * so that the umap makes sense.
428 if(bytes
> 0 && !vm_contiguous(rp
, linear
, bytes
)) {
429 printf("umap_virtual: %s: %d at 0x%lx (vir 0x%lx) not contiguous\n",
430 rp
->p_name
, bytes
, linear
, vir_addr
);
434 /* phys must be larger than 0 (or the caller will think the call
435 * failed), and address must not cross a page boundary.
443 /*===========================================================================*
445 *===========================================================================*/
446 PUBLIC
int vm_lookup(struct proc
*proc
, vir_bytes
virtual, vir_bytes
*physical
, u32_t
*ptent
)
451 NOREC_ENTER(vmlookup
);
455 vmassert(!isemptyp(proc
));
459 NOREC_RETURN(vmlookup
, OK
);
462 /* Retrieve page directory entry. */
463 root
= (u32_t
*) proc
->p_seg
.p_cr3
;
464 vmassert(!((u32_t
) root
% I386_PAGE_SIZE
));
465 pde
= I386_VM_PDE(virtual);
466 vmassert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
467 pde_v
= phys_get32((u32_t
) (root
+ pde
));
469 if(!(pde_v
& I386_VM_PRESENT
)) {
470 NOREC_RETURN(vmlookup
, EFAULT
);
473 /* We don't expect to ever see this. */
474 if(pde_v
& I386_VM_BIGPAGE
) {
475 *physical
= pde_v
& I386_VM_ADDR_MASK_4MB
;
476 if(ptent
) *ptent
= pde_v
;
477 *physical
+= virtual & I386_VM_OFFSET_MASK_4MB
;
479 /* Retrieve page table entry. */
480 pt
= (u32_t
*) I386_VM_PFA(pde_v
);
481 vmassert(!((u32_t
) pt
% I386_PAGE_SIZE
));
482 pte
= I386_VM_PTE(virtual);
483 vmassert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
484 pte_v
= phys_get32((u32_t
) (pt
+ pte
));
485 if(!(pte_v
& I386_VM_PRESENT
)) {
486 NOREC_RETURN(vmlookup
, EFAULT
);
489 if(ptent
) *ptent
= pte_v
;
491 /* Actual address now known; retrieve it and add page offset. */
492 *physical
= I386_VM_PFA(pte_v
);
493 *physical
+= virtual % I386_PAGE_SIZE
;
496 NOREC_RETURN(vmlookup
, OK
);
499 /*===========================================================================*
501 *===========================================================================*/
502 PUBLIC
int vm_contiguous(struct proc
*targetproc
, u32_t vir_buf
, size_t bytes
)
505 u32_t prev_phys
= 0; /* Keep lints happy. */
508 vmassert(targetproc
);
511 if(!HASPT(targetproc
))
514 /* Start and end at page boundary to make logic simpler. */
515 po
= vir_buf
% I386_PAGE_SIZE
;
520 po
= (vir_buf
+ bytes
) % I386_PAGE_SIZE
;
522 bytes
+= I386_PAGE_SIZE
- po
;
524 /* Keep going as long as we cross a page boundary. */
528 if((r
=vm_lookup(targetproc
, vir_buf
, &phys
, NULL
)) != OK
) {
529 printf("vm_contiguous: vm_lookup failed, %d\n", r
);
530 printf("kernel stack: ");
536 if(prev_phys
+I386_PAGE_SIZE
!= phys
) {
537 printf("vm_contiguous: no (0x%lx, 0x%lx)\n",
539 printf("kernel stack: ");
548 vir_buf
+= I386_PAGE_SIZE
;
549 bytes
-= I386_PAGE_SIZE
;
555 /*===========================================================================*
557 *===========================================================================*/
558 PRIVATE
void vm_suspend(struct proc
*caller
, struct proc
*target
,
559 vir_bytes linaddr
, vir_bytes len
, int type
)
561 /* This range is not OK for this process. Set parameters
562 * of the request and notify VM about the pending request.
564 vmassert(!RTS_ISSET(caller
, RTS_VMREQUEST
));
565 vmassert(!RTS_ISSET(target
, RTS_VMREQUEST
));
567 RTS_SET(caller
, RTS_VMREQUEST
);
570 caller
->p_vmrequest
.stacktrace
[0] = '\0';
571 util_stacktrace_strcat(caller
->p_vmrequest
.stacktrace
);
574 caller
->p_vmrequest
.req_type
= VMPTYPE_CHECK
;
575 caller
->p_vmrequest
.target
= target
->p_endpoint
;
576 caller
->p_vmrequest
.params
.check
.start
= linaddr
;
577 caller
->p_vmrequest
.params
.check
.length
= len
;
578 caller
->p_vmrequest
.params
.check
.writeflag
= 1;
579 caller
->p_vmrequest
.type
= type
;
581 /* Connect caller on vmrequest wait queue. */
582 if(!(caller
->p_vmrequest
.nextrequestor
= vmrequest
))
583 mini_notify(proc_addr(SYSTEM
), VM_PROC_NR
);
587 /*===========================================================================*
589 *===========================================================================*/
590 int delivermsg(struct proc
*rp
)
594 NOREC_ENTER(deliver
);
596 vmassert(rp
->p_misc_flags
& MF_DELIVERMSG
);
597 vmassert(rp
->p_delivermsg
.m_source
!= NONE
);
599 vmassert(rp
->p_delivermsg_lin
);
601 if(rp
->p_delivermsg_lin
!=
602 umap_local(rp
, D
, rp
->p_delivermsg_vir
, sizeof(message
))) {
603 printf("vir: 0x%lx lin was: 0x%lx umap now: 0x%lx\n",
604 rp
->p_delivermsg_vir
, rp
->p_delivermsg_lin
,
605 umap_local(rp
, D
, rp
->p_delivermsg_vir
, sizeof(message
)));
606 minix_panic("that's wrong", NO_NUM
);
611 PHYS_COPY_CATCH(vir2phys(&rp
->p_delivermsg
),
612 rp
->p_delivermsg_lin
, sizeof(message
), addr
);
615 vm_suspend(rp
, rp
, rp
->p_delivermsg_lin
, sizeof(message
),
620 rp
->p_delivermsg
.m_source
= NONE
;
621 rp
->p_delivermsg_lin
= 0;
623 rp
->p_misc_flags
&= ~MF_DELIVERMSG
;
627 NOREC_RETURN(deliver
, r
);
630 PRIVATE
char *flagstr(u32_t e
, int dir
)
634 FLAG(I386_VM_PRESENT
);
639 FLAG(I386_VM_GLOBAL
);
641 FLAG(I386_VM_BIGPAGE
); /* Page directory entry only */
643 FLAG(I386_VM_DIRTY
); /* Page table entry only */
647 PRIVATE
void vm_pt_print(u32_t
*pagetable
, u32_t v
)
652 vmassert(!((u32_t
) pagetable
% I386_PAGE_SIZE
));
654 for(pte
= 0; pte
< I386_VM_PT_ENTRIES
; pte
++) {
656 pte_v
= phys_get32((u32_t
) (pagetable
+ pte
));
657 if(!(pte_v
& I386_VM_PRESENT
))
659 pfa
= I386_VM_PFA(pte_v
);
660 printf("%4d:%08lx:%08lx %2s ",
661 pte
, v
+ I386_PAGE_SIZE
*pte
, pfa
,
662 (pte_v
& I386_VM_WRITE
) ? "rw":"RO");
664 if(col
== 3) { printf("\n"); col
= 0; }
666 if(col
> 0) printf("\n");
671 PRIVATE
void vm_print(u32_t
*root
)
675 vmassert(!((u32_t
) root
% I386_PAGE_SIZE
));
677 printf("page table 0x%lx:\n", root
);
679 for(pde
= 0; pde
< I386_VM_DIR_ENTRIES
; pde
++) {
682 pde_v
= phys_get32((u32_t
) (root
+ pde
));
683 if(!(pde_v
& I386_VM_PRESENT
))
685 if(pde_v
& I386_VM_BIGPAGE
) {
686 printf("%4d: 0x%lx, flags %s\n",
687 pde
, I386_VM_PFA(pde_v
), flagstr(pde_v
, 1));
689 pte_a
= (u32_t
*) I386_VM_PFA(pde_v
);
690 printf("%4d: pt %08lx %s\n",
691 pde
, pte_a
, flagstr(pde_v
, 1));
692 vm_pt_print(pte_a
, pde
* I386_VM_PT_ENTRIES
* I386_PAGE_SIZE
);
701 /*===========================================================================*
703 *===========================================================================*/
704 int vm_phys_memset(phys_bytes ph
, u8_t c
, phys_bytes bytes
)
707 NOREC_ENTER(physmemset
);
709 p
= c
| (c
<< 8) | (c
<< 16) | (c
<< 24);
712 phys_memset(ph
, p
, bytes
);
713 NOREC_RETURN(physmemset
, OK
);
716 vmassert(nfreepdes
>= 3);
718 /* With VM, we have to map in the physical memory.
719 * We can do this 4MB at a time.
723 vir_bytes chunk
= (vir_bytes
) bytes
;
726 CREATEPDE(((struct proc
*) NULL
), ptr
, ph
, chunk
, bytes
, pde
, t
);
727 /* We can memset as many bytes as we have remaining,
728 * or as many as remain in the 4MB chunk we mapped in.
730 phys_memset(ptr
, p
, chunk
);
737 NOREC_RETURN(physmemset
, OK
);
740 /*===========================================================================*
742 *===========================================================================*/
743 PUBLIC
int virtual_copy_f(caller
, src_addr
, dst_addr
, bytes
, vmcheck
)
744 struct proc
* caller
;
745 struct vir_addr
*src_addr
; /* source virtual address */
746 struct vir_addr
*dst_addr
; /* destination virtual address */
747 vir_bytes bytes
; /* # of bytes to copy */
748 int vmcheck
; /* if nonzero, can return VMSUSPEND */
750 /* Copy bytes from virtual address src_addr to virtual address dst_addr.
751 * Virtual addresses can be in ABS, LOCAL_SEG, REMOTE_SEG, or BIOS_SEG.
753 struct vir_addr
*vir_addr
[2]; /* virtual source and destination address */
754 phys_bytes phys_addr
[2]; /* absolute source and destination */
757 struct proc
*procs
[2];
758 NOREC_ENTER(virtualcopy
);
760 vmassert((vmcheck
&& caller
) || (!vmcheck
&& !caller
));
762 /* Check copy count. */
763 if (bytes
<= 0) return(EDOM
);
765 /* Do some more checks and map virtual addresses to physical addresses. */
766 vir_addr
[_SRC_
] = src_addr
;
767 vir_addr
[_DST_
] = dst_addr
;
769 for (i
=_SRC_
; i
<=_DST_
; i
++) {
773 type
= vir_addr
[i
]->segment
& SEGMENT_TYPE
;
774 if((type
!= PHYS_SEG
&& type
!= BIOS_SEG
) &&
775 isokendpt(vir_addr
[i
]->proc_nr_e
, &proc_nr
))
776 p
= proc_addr(proc_nr
);
782 /* Get physical address. */
787 NOREC_RETURN(virtualcopy
, EDEADSRCDST
);
789 seg_index
= vir_addr
[i
]->segment
& SEGMENT_INDEX
;
790 if(type
== LOCAL_SEG
)
791 phys_addr
[i
] = umap_local(p
, seg_index
, vir_addr
[i
]->offset
,
794 phys_addr
[i
] = umap_virtual(p
, seg_index
,
795 vir_addr
[i
]->offset
, bytes
);
796 if(phys_addr
[i
] == 0) {
797 printf("virtual_copy: map 0x%x failed for %s seg %d, "
798 "offset %lx, len %d, i %d\n",
799 type
, p
->p_name
, seg_index
, vir_addr
[i
]->offset
,
805 NOREC_RETURN(virtualcopy
, EDEADSRCDST
);
807 seg_index
= vir_addr
[i
]->segment
& SEGMENT_INDEX
;
808 phys_addr
[i
] = umap_remote(p
, seg_index
, vir_addr
[i
]->offset
, bytes
);
810 #if _MINIX_CHIP == _CHIP_INTEL
812 phys_addr
[i
] = umap_bios(vir_addr
[i
]->offset
, bytes
);
816 phys_addr
[i
] = vir_addr
[i
]->offset
;
819 printf("virtual_copy: strange type 0x%x\n", type
);
820 NOREC_RETURN(virtualcopy
, EINVAL
);
823 /* Check if mapping succeeded. */
824 if (phys_addr
[i
] <= 0 && vir_addr
[i
]->segment
!= PHYS_SEG
) {
825 printf("virtual_copy EFAULT\n");
826 NOREC_RETURN(virtualcopy
, EFAULT
);
833 if(caller
&& RTS_ISSET(caller
, RTS_VMREQUEST
)) {
834 vmassert(caller
->p_vmrequest
.vmresult
!= VMSUSPEND
);
835 RTS_UNSET(caller
, RTS_VMREQUEST
);
836 if(caller
->p_vmrequest
.vmresult
!= OK
) {
838 printf("virtual_copy: returning VM error %d\n",
839 caller
->p_vmrequest
.vmresult
);
841 NOREC_RETURN(virtualcopy
, caller
->p_vmrequest
.vmresult
);
845 if((r
=lin_lin_copy(procs
[_SRC_
], phys_addr
[_SRC_
],
846 procs
[_DST_
], phys_addr
[_DST_
], bytes
)) != OK
) {
849 if(r
!= EFAULT_SRC
&& r
!= EFAULT_DST
)
850 minix_panic("lin_lin_copy failed", r
);
851 if(!vmcheck
|| !caller
) {
852 NOREC_RETURN(virtualcopy
, r
);
855 vmassert(procs
[_SRC_
] && procs
[_DST_
]);
857 if(r
== EFAULT_SRC
) {
858 lin
= phys_addr
[_SRC_
];
859 target
= procs
[_SRC_
];
860 } else if(r
== EFAULT_DST
) {
861 lin
= phys_addr
[_DST_
];
862 target
= procs
[_DST_
];
864 minix_panic("r strange", r
);
868 printf("virtual_copy: suspending caller %d / %s, target %d / %s\n",
869 caller
->p_endpoint
, caller
->p_name
,
870 target
->p_endpoint
, target
->p_name
);
873 vmassert(proc_ptr
->p_endpoint
== SYSTEM
);
874 vm_suspend(caller
, target
, lin
, bytes
,
876 NOREC_RETURN(virtualcopy
, VMSUSPEND
);
879 NOREC_RETURN(virtualcopy
, OK
);
882 vmassert(!vm_running
);
884 /* can't copy to/from process with PT without VM */
885 #define NOPT(p) (!(p) || !HASPT(p))
886 if(!NOPT(procs
[_SRC_
])) {
887 printf("ignoring page table src: %s / %d at 0x%lx\n",
888 procs
[_SRC_
]->p_name
, procs
[_SRC_
]->p_endpoint
, procs
[_SRC_
]->p_seg
.p_cr3
);
890 if(!NOPT(procs
[_DST_
])) {
891 printf("ignoring page table dst: %s / %d at 0x%lx\n",
892 procs
[_DST_
]->p_name
, procs
[_DST_
]->p_endpoint
,
893 procs
[_DST_
]->p_seg
.p_cr3
);
896 /* Now copy bytes between physical addresseses. */
897 if(phys_copy(phys_addr
[_SRC_
], phys_addr
[_DST_
], (phys_bytes
) bytes
))
898 NOREC_RETURN(virtualcopy
, EFAULT
);
900 NOREC_RETURN(virtualcopy
, OK
);
903 /*===========================================================================*
905 *===========================================================================*/
906 PUBLIC
int data_copy(endpoint_t from_proc
, vir_bytes from_addr
,
907 endpoint_t to_proc
, vir_bytes to_addr
,
910 struct vir_addr src
, dst
;
912 src
.segment
= dst
.segment
= D
;
913 src
.offset
= from_addr
;
914 dst
.offset
= to_addr
;
915 src
.proc_nr_e
= from_proc
;
916 dst
.proc_nr_e
= to_proc
;
918 return virtual_copy(&src
, &dst
, bytes
);
921 /*===========================================================================*
922 * data_copy_vmcheck *
923 *===========================================================================*/
924 PUBLIC
int data_copy_vmcheck(struct proc
* caller
,
925 endpoint_t from_proc
, vir_bytes from_addr
,
926 endpoint_t to_proc
, vir_bytes to_addr
,
929 struct vir_addr src
, dst
;
931 src
.segment
= dst
.segment
= D
;
932 src
.offset
= from_addr
;
933 dst
.offset
= to_addr
;
934 src
.proc_nr_e
= from_proc
;
935 dst
.proc_nr_e
= to_proc
;
937 return virtual_copy_vmcheck(caller
, &src
, &dst
, bytes
);
940 /*===========================================================================*
942 *===========================================================================*/
943 PUBLIC
void arch_pre_exec(struct proc
*pr
, u32_t ip
, u32_t sp
)
945 /* wipe extra LDT entries, set program counter, and stack pointer. */
946 memset(pr
->p_seg
.p_ldt
+ EXTRA_LDT_INDEX
, 0,
947 sizeof(pr
->p_seg
.p_ldt
[0]) * (LDT_SIZE
- EXTRA_LDT_INDEX
));
952 /*===========================================================================*
954 *===========================================================================*/
955 PUBLIC
int arch_umap(struct proc
*pr
, vir_bytes offset
, vir_bytes count
,
956 int seg
, phys_bytes
*addr
)
960 *addr
= umap_bios(offset
, count
);
964 /* This must be EINVAL; the umap fallback function in
965 * lib/syslib/alloc_util.c depends on it to detect an
966 * older kernel (as opposed to mapping error).
971 /* VM reports page directory slot we're allowed to use freely. */
972 void i386_freepde(int pde
)
974 if(nfreepdes
>= WANT_FREEPDES
)
976 freepdes
[nfreepdes
++] = pde
;
979 PUBLIC
arch_phys_map(int index
, phys_bytes
*addr
, phys_bytes
*len
, int *flags
)
982 /* map the local APIC if enabled */
983 if (index
== 0 && lapic_addr
) {
984 *addr
= vir2phys(lapic_addr
);
985 *len
= 4 << 10 /* 4kB */;
986 *flags
= VMMF_UNCACHED
;
991 /* we don't want anything */
996 PUBLIC
int arch_phys_map_reply(int index
, vir_bytes addr
)
999 /* if local APIC is enabled */
1000 if (index
== 0 && lapic_addr
) {
1001 lapic_addr_vaddr
= addr
;
1007 PUBLIC
int arch_enable_paging(void)
1010 /* if local APIC is enabled */
1012 lapic_addr
= lapic_addr_vaddr
;
1013 lapic_eoi_addr
= LAPIC_EOI
;
1016 #ifdef CONFIG_WATCHDOG
1018 * We make sure that we don't enable the watchdog until paging is turned
1019 * on as we might get a NMI while switching and we might still use wrong
1020 * lapic address. Bad things would happen. It is unfortunate but such is
1023 i386_watchdog_start();