2 * Copyright 2007, Haiku Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
6 * François Revol <revol@free.fr>
8 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
9 * Distributed under the terms of the MIT License.
11 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
12 * Distributed under the terms of the NewOS License.
15 #ifndef ARCH_M68K_MMU_TYPE
16 #error This file is included from arch_*_mmu.cpp
19 /* (mmu_man) Implementation details on 68030 and others:
21 Unlike on x86 we can't just switch the context to another team by just
22 setting a register to another page directory, since we only have one
23 page table containing both kernel and user address mappings.
24 The 030 supports arbitrary layout of the page directory tree, including
25 a 1-bit first level (2 entries top level table) that would map kernel
26 and user land at a single place. But 040 and later only support a fixed
27 splitting of 7/7/6 for 4K pages.
29 Since 68k SMP hardware is rare enough we don't want to support them, we
30 can take some shortcuts.
32 As we don't want a separate user and kernel space, we'll use a single
33 table. With the 7/7/6 split the 2nd level would require 32KB of tables,
34 which is small enough to not want to use the list hack from x86.
35 XXX: we use the hack for now, check later
37 Since page directories/tables don't fit exactly a page, we stuff more
38 than one per page, and allocate them all at once, and add them at the
39 same time to the tree. So we guarantee all higher-level entries modulo
40 the number of tables/page are either invalid or present.
43 #include <KernelExport.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_priv.h>
49 #include <vm/VMAddressSpace.h>
51 #include <boot/kernel_args.h>
52 #include <arch/vm_translation_map.h>
57 #include "generic_vm_physical_page_mapper.h"
58 #include "generic_vm_physical_page_ops.h"
63 # define TRACE(x) dprintf x
69 //#define IOSPACE_SIZE (4*1024*1024)
70 #define IOSPACE_SIZE (16*1024*1024)
72 #define IOSPACE_CHUNK_SIZE (NUM_PAGEENT_PER_TBL*B_PAGE_SIZE)
74 static page_table_entry
*iospace_pgtables
= NULL
;
76 #define PAGE_INVALIDATE_CACHE_SIZE 64
78 // vm_translation object stuff
79 typedef struct vm_translation_map_arch_info
{
80 page_root_entry
*rtdir_virt
;
81 page_root_entry
*rtdir_phys
;
82 int num_invalidate_pages
;
83 addr_t pages_to_invalidate
[PAGE_INVALIDATE_CACHE_SIZE
];
84 } vm_translation_map_arch_info
;
87 static page_table_entry
*page_hole
= NULL
;
88 static page_directory_entry
*page_hole_pgdir
= NULL
;
90 static page_root_entry
*sKernelPhysicalPageRoot
= NULL
;
91 static page_root_entry
*sKernelVirtualPageRoot
= NULL
;
92 static addr_t sQueryPage
= NULL
;
93 //static page_table_entry *sQueryPageTable;
94 //static page_directory_entry *sQueryPageDir;
96 static page_table_entry sQueryDesc
__attribute__ (( aligned (4) ));
98 static vm_translation_map
*tmap_list
;
99 static spinlock tmap_list_lock
;
101 static addr_t sIOSpaceBase
;
103 #define CHATTY_TMAP 0
106 // use P*E_TO_* and TA_TO_P*EA !
107 #define ADDR_SHIFT(x) ((x)>>12)
108 #define ADDR_REVERSE_SHIFT(x) ((x)<<12)
111 #define FIRST_USER_PGROOT_ENT (VADDR_TO_PRENT(USER_BASE))
112 #define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE))
113 #define NUM_USER_PGROOT_ENTS (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
114 #define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
115 #define FIRST_KERNEL_PGROOT_ENT (VADDR_TO_PRENT(KERNEL_BASE))
116 #define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
117 #define NUM_KERNEL_PGROOT_ENTS (VADDR_TO_PRENT(KERNEL_SIZE))
118 #define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
119 #define IS_KERNEL_MAP(map) (map->arch_data->rtdir_phys == sKernelPhysicalPageRoot)
121 static status_t
early_query(addr_t va
, addr_t
*out_physical
);
122 static status_t
get_physical_page_tmap_internal(addr_t pa
, addr_t
*va
, uint32 flags
);
123 static status_t
put_physical_page_tmap_internal(addr_t va
);
125 static void flush_tmap(vm_translation_map
*map
);
128 #warning M68K: RENAME
130 _m68k_translation_map_get_pgdir(vm_translation_map
*map
)
132 return map
->arch_data
->rtdir_phys
;
137 init_page_root_entry(page_root_entry
*entry
)
140 *(page_root_entry_scalar
*)entry
= DFL_ROOTENT_VAL
;
145 update_page_root_entry(page_root_entry
*entry
, page_root_entry
*with
)
147 // update page directory entry atomically
148 *(page_root_entry_scalar
*)entry
= *(page_root_entry_scalar
*)with
;
153 init_page_directory_entry(page_directory_entry
*entry
)
155 *(page_directory_entry_scalar
*)entry
= DFL_DIRENT_VAL
;
160 update_page_directory_entry(page_directory_entry
*entry
, page_directory_entry
*with
)
162 // update page directory entry atomically
163 *(page_directory_entry_scalar
*)entry
= *(page_directory_entry_scalar
*)with
;
168 init_page_table_entry(page_table_entry
*entry
)
170 *(page_table_entry_scalar
*)entry
= DFL_PAGEENT_VAL
;
175 update_page_table_entry(page_table_entry
*entry
, page_table_entry
*with
)
177 // update page table entry atomically
178 // XXX: is it ?? (long desc?)
179 *(page_table_entry_scalar
*)entry
= *(page_table_entry_scalar
*)with
;
184 init_page_indirect_entry(page_indirect_entry
*entry
)
186 #warning M68K: is it correct ?
187 *(page_indirect_entry_scalar
*)entry
= DFL_PAGEENT_VAL
;
192 update_page_indirect_entry(page_indirect_entry
*entry
, page_indirect_entry
*with
)
194 // update page table entry atomically
195 // XXX: is it ?? (long desc?)
196 *(page_indirect_entry_scalar
*)entry
= *(page_indirect_entry_scalar
*)with
;
200 #warning M68K: allocate all kernel pgdirs at boot and remove this (also dont remove them anymore from unmap)
202 _update_all_pgdirs(int index
, page_root_entry e
)
204 vm_translation_map
*entry
;
205 unsigned int state
= disable_interrupts();
207 acquire_spinlock(&tmap_list_lock
);
209 for(entry
= tmap_list
; entry
!= NULL
; entry
= entry
->next
)
210 entry
->arch_data
->rtdir_virt
[index
] = e
;
212 release_spinlock(&tmap_list_lock
);
213 restore_interrupts(state
);
217 // this is used before the vm is fully up, it uses the
218 // transparent translation of the first 256MB
219 // a set up by the bootloader.
221 early_query(addr_t va
, addr_t
*_physicalAddress
)
223 page_root_entry
*pr
= sKernelVirtualPageRoot
;
224 page_directory_entry
*pd
;
225 page_indirect_entry
*pi
;
226 page_table_entry
*pt
;
229 status_t err
= B_ERROR
; // no pagetable here
230 TRACE(("%s(%p,)\n", __FUNCTION__
, va
));
232 index
= VADDR_TO_PRENT(va
);
233 TRACE(("%s: pr[%d].type %d\n", __FUNCTION__
, index
, pr
[index
].type
));
234 if (pr
&& pr
[index
].type
== DT_ROOT
) {
235 pa
= PRE_TO_TA(pr
[index
]);
236 // pa == va when in TT
237 // and no need to fiddle with cache
238 pd
= (page_directory_entry
*)pa
;
240 index
= VADDR_TO_PDENT(va
);
241 TRACE(("%s: pd[%d].type %d\n", __FUNCTION__
, index
,
242 pd
?(pd
[index
].type
):-1));
243 if (pd
&& pd
[index
].type
== DT_DIR
) {
244 pa
= PDE_TO_TA(pd
[index
]);
245 pt
= (page_table_entry
*)pa
;
247 index
= VADDR_TO_PTENT(va
);
248 TRACE(("%s: pt[%d].type %d\n", __FUNCTION__
, index
,
249 pt
?(pt
[index
].type
):-1));
250 if (pt
&& pt
[index
].type
== DT_INDIRECT
) {
251 pi
= (page_indirect_entry
*)pt
;
252 pa
= PIE_TO_TA(pi
[index
]);
253 pt
= (page_table_entry
*)pa
;
254 index
= 0; // single descriptor
257 if (pt
&& pt
[index
].type
== DT_PAGE
) {
258 *_physicalAddress
= PTE_TO_PA(pt
[index
]);
259 // we should only be passed page va, but just in case.
260 *_physicalAddress
+= va
% B_PAGE_SIZE
;
270 /*! Acquires the map's recursive lock, and resets the invalidate pages counter
271 in case it's the first locking recursion.
274 lock_tmap(vm_translation_map
*map
)
276 TRACE(("lock_tmap: map %p\n", map
));
278 recursive_lock_lock(&map
->lock
);
279 if (recursive_lock_get_recursion(&map
->lock
) == 1) {
280 // we were the first one to grab the lock
281 TRACE(("clearing invalidated page count\n"));
282 map
->arch_data
->num_invalidate_pages
= 0;
289 /*! Unlocks the map, and, if we'll actually losing the recursive lock,
290 flush all pending changes of this map (ie. flush TLB caches as
294 unlock_tmap(vm_translation_map
*map
)
296 TRACE(("unlock_tmap: map %p\n", map
));
298 if (recursive_lock_get_recursion(&map
->lock
) == 1) {
299 // we're about to release it for the last time
303 recursive_lock_unlock(&map
->lock
);
309 destroy_tmap(vm_translation_map
*map
)
312 vm_translation_map
*entry
;
313 vm_translation_map
*last
= NULL
;
319 // remove it from the tmap list
320 state
= disable_interrupts();
321 acquire_spinlock(&tmap_list_lock
);
324 while (entry
!= NULL
) {
327 last
->next
= entry
->next
;
329 tmap_list
= entry
->next
;
337 release_spinlock(&tmap_list_lock
);
338 restore_interrupts(state
);
340 if (map
->arch_data
->rtdir_virt
!= NULL
) {
341 // cycle through and free all of the user space pgtables
342 // since the size of tables don't match B_PAGE_SIZE,
343 // we alloc several at once, based on modulos,
344 // we make sure they are either all in the tree or none.
345 for (i
= VADDR_TO_PRENT(USER_BASE
); i
<= VADDR_TO_PRENT(USER_BASE
+ (USER_SIZE
- 1)); i
++) {
347 page_directory_entry
*pgdir
;
350 if (map
->arch_data
->rtdir_virt
[i
].type
== DT_INVALID
)
352 if (map
->arch_data
->rtdir_virt
[i
].type
!= DT_ROOT
) {
353 panic("rtdir[%d]: buggy descriptor type", i
);
356 // suboptimal (done 8 times)
357 pgdir_pn
= PRE_TO_PN(map
->arch_data
->rtdir_virt
[i
]);
358 dirpage
= vm_lookup_page(pgdir_pn
);
359 pgdir
= &(((page_directory_entry
*)dirpage
)[i
%NUM_DIRTBL_PER_PAGE
]);
361 for (j
= 0; j
<= NUM_DIRENT_PER_TBL
; j
+=NUM_PAGETBL_PER_PAGE
) {
363 page_table_entry
*pgtbl
;
365 if (pgdir
[j
].type
== DT_INVALID
)
367 if (pgdir
[j
].type
!= DT_DIR
) {
368 panic("rtdir[%d][%d]: buggy descriptor type", i
, j
);
371 pgtbl_pn
= PDE_TO_PN(pgdir
[j
]);
372 page
= vm_lookup_page(pgtbl_pn
);
373 pgtbl
= (page_table_entry
*)page
;
376 panic("destroy_tmap: didn't find pgtable page\n");
379 DEBUG_PAGE_ACCESS_START(page
);
380 vm_page_set_state(page
, PAGE_STATE_FREE
);
382 if (((i
+ 1) % NUM_DIRTBL_PER_PAGE
) == 0) {
383 DEBUG_PAGE_ACCESS_END(dirpage
);
384 vm_page_set_state(dirpage
, PAGE_STATE_FREE
);
387 free(map
->arch_data
->rtdir_virt
);
390 free(map
->arch_data
);
391 recursive_lock_destroy(&map
->lock
);
396 put_pgdir_in_pgroot(page_root_entry
*entry
,
397 addr_t pgdir_phys
, uint32 attributes
)
400 // put it in the pgdir
401 init_page_root_entry(&dir
);
402 dir
.addr
= TA_TO_PREA(pgdir_phys
);
404 // ToDo: we ignore the attributes of the page table - for compatibility
405 // with BeOS we allow having user accessible areas in the kernel address
406 // space. This is currently being used by some drivers, mainly for the
407 // frame buffer. Our current real time data implementation makes use of
409 // We might want to get rid of this possibility one day, especially if
410 // we intend to port it to a platform that does not support this.
414 update_page_root_entry(entry
, &dir
);
419 put_pgtable_in_pgdir(page_directory_entry
*entry
,
420 addr_t pgtable_phys
, uint32 attributes
)
422 page_directory_entry table
;
423 // put it in the pgdir
424 init_page_directory_entry(&table
);
425 table
.addr
= TA_TO_PDEA(pgtable_phys
);
427 // ToDo: we ignore the attributes of the page table - for compatibility
428 // with BeOS we allow having user accessible areas in the kernel address
429 // space. This is currently being used by some drivers, mainly for the
430 // frame buffer. Our current real time data implementation makes use of
432 // We might want to get rid of this possibility one day, especially if
433 // we intend to port it to a platform that does not support this.
437 update_page_directory_entry(entry
, &table
);
442 put_page_table_entry_in_pgtable(page_table_entry
*entry
,
443 addr_t physicalAddress
, uint32 attributes
, bool globalPage
)
445 page_table_entry page
;
446 init_page_table_entry(&page
);
448 page
.addr
= TA_TO_PTEA(physicalAddress
);
450 // if the page is user accessible, it's automatically
451 // accessible in kernel space, too (but with the same
453 page
.supervisor
= (attributes
& B_USER_PROTECTION
) == 0;
455 page
.write_protect
= (attributes
& B_KERNEL_WRITE_AREA
) == 0;
457 page
.write_protect
= (attributes
& B_WRITE_AREA
) == 0;
460 #ifdef PAGE_HAS_GLOBAL_BIT
465 // put it in the page table
466 update_page_table_entry(entry
, &page
);
471 put_page_indirect_entry_in_pgtable(page_indirect_entry
*entry
,
472 addr_t physicalAddress
, uint32 attributes
, bool globalPage
)
474 page_indirect_entry page
;
475 init_page_indirect_entry(&page
);
477 page
.addr
= TA_TO_PIEA(physicalAddress
);
478 page
.type
= DT_INDIRECT
;
480 // there are no protection bits in indirect descriptor usually.
482 // put it in the page table
483 update_page_indirect_entry(entry
, &page
);
488 map_max_pages_need(vm_translation_map */
*map*/
, addr_t start
, addr_t end
)
492 // If start == 0, the actual base address is not yet known to the caller
493 // and we shall assume the worst case.
495 #warning M68K: FIXME?
496 start
= (1023) * B_PAGE_SIZE
;
499 pgdirs
= VADDR_TO_PRENT(end
) + 1 - VADDR_TO_PRENT(start
);
500 // how much for page directories
501 need
= (pgdirs
+ NUM_DIRTBL_PER_PAGE
- 1) / NUM_DIRTBL_PER_PAGE
;
502 // and page tables themselves
503 need
= ((pgdirs
* NUM_DIRENT_PER_TBL
) + NUM_PAGETBL_PER_PAGE
- 1) / NUM_PAGETBL_PER_PAGE
;
505 // better rounding when only 1 pgdir
506 // XXX: do better for other cases
509 need
+= (VADDR_TO_PDENT(end
) + 1 - VADDR_TO_PDENT(start
) + NUM_PAGETBL_PER_PAGE
- 1) / NUM_PAGETBL_PER_PAGE
;
517 map_tmap(vm_translation_map
*map
, addr_t va
, addr_t pa
, uint32 attributes
)
520 page_directory_entry
*pd
;
521 page_table_entry
*pt
;
523 unsigned int rindex
, dindex
, pindex
;
526 TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa
, va
));
529 dprintf("pgdir at 0x%x\n", pgdir);
530 dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
531 dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
532 dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
533 dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
534 dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
536 pr
= map
->arch_data
->rtdir_virt
;
538 // check to see if a page directory exists for this range
539 rindex
= VADDR_TO_PRENT(va
);
540 if (pr
[rindex
].type
!= DT_ROOT
) {
545 // we need to allocate a pgtable
546 page
= vm_page_allocate_page(PAGE_STATE_WIRED
| VM_PAGE_ALLOC_CLEAR
);
548 DEBUG_PAGE_ACCESS_END(page
);
550 pgdir
= page
->physical_page_number
* B_PAGE_SIZE
;
552 TRACE(("map_tmap: asked for free page for pgdir. 0x%lx\n", pgdir
));
554 // for each pgdir on the allocated page:
555 for (i
= 0; i
< NUM_DIRTBL_PER_PAGE
; i
++) {
556 unsigned aindex
= rindex
& ~(NUM_DIRTBL_PER_PAGE
-1); /* aligned */
557 page_root_entry
*apr
= &pr
[aindex
+ i
];
560 put_pgdir_in_pgroot(apr
, pgdir
, attributes
561 | (attributes
& B_USER_PROTECTION
? B_WRITE_AREA
: B_KERNEL_WRITE_AREA
));
563 // update any other page directories, if it maps kernel space
564 //XXX: suboptimal, should batch them
565 if ((aindex
+i
) >= FIRST_KERNEL_PGDIR_ENT
566 && (aindex
+i
) < (FIRST_KERNEL_PGDIR_ENT
+ NUM_KERNEL_PGDIR_ENTS
))
567 _update_all_pgdirs((aindex
+i
), pr
[aindex
+i
]);
571 #warning M68K: really mean map_count++ ??
574 // now, fill in the pentry
576 err
= get_physical_page_tmap_internal(PRE_TO_PA(pr
[rindex
]),
577 &pd_pg
, PHYSICAL_PAGE_DONT_WAIT
);
579 pd
= (page_directory_entry
*)pd_pg
;
580 // we want the table at rindex, not at rindex%(tbl/page)
581 pd
+= (rindex
% NUM_DIRTBL_PER_PAGE
) * NUM_DIRENT_PER_TBL
;
583 // check to see if a page table exists for this range
584 dindex
= VADDR_TO_PDENT(va
);
585 if (pd
[dindex
].type
!= DT_DIR
) {
590 // we need to allocate a pgtable
591 page
= vm_page_allocate_page(PAGE_STATE_WIRED
| VM_PAGE_ALLOC_CLEAR
);
593 DEBUG_PAGE_ACCESS_END(page
);
595 pgtable
= page
->physical_page_number
* B_PAGE_SIZE
;
597 TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable
));
599 // for each pgtable on the allocated page:
600 for (i
= 0; i
< NUM_PAGETBL_PER_PAGE
; i
++) {
601 unsigned aindex
= dindex
& ~(NUM_PAGETBL_PER_PAGE
-1); /* aligned */
602 page_directory_entry
*apd
= &pd
[aindex
+ i
];
605 put_pgtable_in_pgdir(apd
, pgtable
, attributes
606 | (attributes
& B_USER_PROTECTION
? B_WRITE_AREA
: B_KERNEL_WRITE_AREA
));
608 // no need to update other page directories for kernel space;
609 // the root-level already point to us.
611 pgtable
+= SIZ_PAGETBL
;
614 #warning M68K: really mean map_count++ ??
617 // now, fill in the pentry
619 err
= get_physical_page_tmap_internal(PDE_TO_PA(pd
[dindex
]),
620 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
622 pt
= (page_table_entry
*)pt_pg
;
623 // we want the table at rindex, not at rindex%(tbl/page)
624 pt
+= (dindex
% NUM_PAGETBL_PER_PAGE
) * NUM_PAGEENT_PER_TBL
;
626 pindex
= VADDR_TO_PTENT(va
);
628 put_page_table_entry_in_pgtable(&pt
[pindex
], pa
, attributes
,
631 put_physical_page_tmap_internal(pt_pg
);
632 put_physical_page_tmap_internal(pd_pg
);
634 if (map
->arch_data
->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE
)
635 map
->arch_data
->pages_to_invalidate
[map
->arch_data
->num_invalidate_pages
] = va
;
637 map
->arch_data
->num_invalidate_pages
++;
646 unmap_tmap(vm_translation_map
*map
, addr_t start
, addr_t end
)
648 page_table_entry
*pt
;
649 page_directory_entry
*pd
;
650 page_root_entry
*pr
= map
->arch_data
->rtdir_virt
;
655 start
= ROUNDDOWN(start
, B_PAGE_SIZE
);
656 end
= ROUNDUP(end
, B_PAGE_SIZE
);
658 TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start
, end
));
664 index
= VADDR_TO_PRENT(start
);
665 if (pr
[index
].type
!= DT_ROOT
) {
666 // no pagedir here, move the start up to access the next page table
667 start
= ROUNDUP(start
+ 1, B_PAGE_SIZE
);
672 status
= get_physical_page_tmap_internal(PRE_TO_PA(pr
[index
]),
673 &pd_pg
, PHYSICAL_PAGE_DONT_WAIT
);
674 } while (status
< B_OK
);
675 pd
= (page_directory_entry
*)pd_pg
;
676 // we want the table at rindex, not at rindex%(tbl/page)
677 pd
+= (index
% NUM_DIRTBL_PER_PAGE
) * NUM_DIRENT_PER_TBL
;
679 index
= VADDR_TO_PDENT(start
);
680 if (pd
[index
].type
!= DT_DIR
) {
681 // no pagetable here, move the start up to access the next page table
682 start
= ROUNDUP(start
+ 1, B_PAGE_SIZE
);
683 put_physical_page_tmap_internal(pd_pg
);
688 status
= get_physical_page_tmap_internal(PDE_TO_PA(pd
[index
]),
689 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
690 } while (status
< B_OK
);
691 pt
= (page_table_entry
*)pt_pg
;
692 // we want the table at rindex, not at rindex%(tbl/page)
693 pt
+= (index
% NUM_PAGETBL_PER_PAGE
) * NUM_PAGEENT_PER_TBL
;
695 for (index
= VADDR_TO_PTENT(start
);
696 (index
< NUM_PAGEENT_PER_TBL
) && (start
< end
);
697 index
++, start
+= B_PAGE_SIZE
) {
698 if (pt
[index
].type
!= DT_PAGE
&& pt
[index
].type
!= DT_INDIRECT
) {
699 // page mapping not valid
703 TRACE(("unmap_tmap: removing page 0x%lx\n", start
));
705 pt
[index
].type
= DT_INVALID
;
708 if (map
->arch_data
->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE
)
709 map
->arch_data
->pages_to_invalidate
[map
->arch_data
->num_invalidate_pages
] = start
;
711 map
->arch_data
->num_invalidate_pages
++;
714 put_physical_page_tmap_internal(pt_pg
);
715 put_physical_page_tmap_internal(pd_pg
);
720 // XXX: 040 should be able to do that with PTEST (but not 030 or 060)
722 query_tmap_interrupt(vm_translation_map
*map
, addr_t va
, addr_t
*_physical
,
725 page_root_entry
*pr
= map
->arch_data
->rtdir_virt
;
726 page_directory_entry
*pd
;
727 page_indirect_entry
*pi
;
728 page_table_entry
*pt
;
729 addr_t physicalPageTable
;
731 status_t err
= B_ERROR
; // no pagetable here
733 if (sQueryPage
== NULL
)
734 return err
; // not yet initialized !?
736 index
= VADDR_TO_PRENT(va
);
737 if (pr
&& pr
[index
].type
== DT_ROOT
) {
738 put_page_table_entry_in_pgtable(&sQueryDesc
, PRE_TO_TA(pr
[index
]), B_KERNEL_READ_AREA
, false);
739 arch_cpu_invalidate_TLB_range((addr_t
)pt
, (addr_t
)pt
);
740 pd
= (page_directory_entry
*)sQueryPage
;
742 index
= VADDR_TO_PDENT(va
);
743 if (pd
&& pd
[index
].type
== DT_DIR
) {
744 put_page_table_entry_in_pgtable(&sQueryDesc
, PDE_TO_TA(pd
[index
]), B_KERNEL_READ_AREA
, false);
745 arch_cpu_invalidate_TLB_range((addr_t
)pt
, (addr_t
)pt
);
746 pt
= (page_table_entry
*)sQueryPage
;
748 index
= VADDR_TO_PTENT(va
);
749 if (pt
&& pt
[index
].type
== DT_INDIRECT
) {
750 pi
= (page_indirect_entry
*)pt
;
751 put_page_table_entry_in_pgtable(&sQueryDesc
, PIE_TO_TA(pi
[index
]), B_KERNEL_READ_AREA
, false);
752 arch_cpu_invalidate_TLB_range((addr_t
)pt
, (addr_t
)pt
);
753 pt
= (page_table_entry
*)sQueryPage
;
754 index
= 0; // single descriptor
757 if (pt
/*&& pt[index].type == DT_PAGE*/) {
758 *_physical
= PTE_TO_PA(pt
[index
]);
759 // we should only be passed page va, but just in case.
760 *_physical
+= va
% B_PAGE_SIZE
;
761 *_flags
|= ((pt
[index
].write_protect
? 0 : B_KERNEL_WRITE_AREA
) | B_KERNEL_READ_AREA
)
762 | (pt
[index
].dirty
? PAGE_MODIFIED
: 0)
763 | (pt
[index
].accessed
? PAGE_ACCESSED
: 0)
764 | ((pt
[index
].type
== DT_PAGE
) ? PAGE_PRESENT
: 0);
770 // unmap the pg table from the indirect desc.
771 sQueryDesc
.type
= DT_INVALID
;
778 query_tmap(vm_translation_map
*map
, addr_t va
, addr_t
*_physical
, uint32
*_flags
)
780 page_table_entry
*pt
;
781 page_indirect_entry
*pi
;
782 page_directory_entry
*pd
;
783 page_directory_entry
*pr
= map
->arch_data
->rtdir_virt
;
784 addr_t pd_pg
, pt_pg
, pi_pg
;
788 // default the flags to not present
792 index
= VADDR_TO_PRENT(va
);
793 if (pr
[index
].type
!= DT_ROOT
) {
799 status
= get_physical_page_tmap_internal(PRE_TO_PA(pr
[index
]),
800 &pd_pg
, PHYSICAL_PAGE_DONT_WAIT
);
801 } while (status
< B_OK
);
802 pd
= (page_directory_entry
*)pd_pg
;
803 // we want the table at rindex, not at rindex%(tbl/page)
804 pd
+= (index
% NUM_DIRTBL_PER_PAGE
) * NUM_DIRENT_PER_TBL
;
807 index
= VADDR_TO_PDENT(va
);
808 if (pd
[index
].type
!= DT_DIR
) {
810 put_physical_page_tmap_internal(pd_pg
);
815 status
= get_physical_page_tmap_internal(PDE_TO_PA(pd
[index
]),
816 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
817 } while (status
< B_OK
);
818 pt
= (page_table_entry
*)pt_pg
;
819 // we want the table at rindex, not at rindex%(tbl/page)
820 pt
+= (index
% NUM_PAGETBL_PER_PAGE
) * NUM_PAGEENT_PER_TBL
;
822 index
= VADDR_TO_PTENT(va
);
824 // handle indirect descriptor
825 if (pt
[index
].type
== DT_INDIRECT
) {
826 pi
= (page_indirect_entry
*)pt
;
829 status
= get_physical_page_tmap_internal(PIE_TO_PA(pi
[index
]),
830 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
831 } while (status
< B_OK
);
832 pt
= (page_table_entry
*)pt_pg
;
833 // add offset from start of page
834 pt
+= PIE_TO_PO(pi
[index
]) / sizeof(page_table_entry
);
835 // release the indirect table page
836 put_physical_page_tmap_internal(pi_pg
);
839 *_physical
= PTE_TO_PA(pt
[index
]);
841 // read in the page state flags
842 if (!pt
[index
].supervisor
)
843 *_flags
|= (pt
[index
].write_protect
? 0 : B_WRITE_AREA
) | B_READ_AREA
;
845 *_flags
|= (pt
[index
].write_protect
? 0 : B_KERNEL_WRITE_AREA
)
847 | (pt
[index
].dirty
? PAGE_MODIFIED
: 0)
848 | (pt
[index
].accessed
? PAGE_ACCESSED
: 0)
849 | ((pt
[index
].type
== DT_PAGE
) ? PAGE_PRESENT
: 0);
851 put_physical_page_tmap_internal(pt_pg
);
852 put_physical_page_tmap_internal(pd_pg
);
854 TRACE(("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical
, va
));
861 get_mapped_size_tmap(vm_translation_map
*map
)
863 return map
->map_count
;
868 protect_tmap(vm_translation_map
*map
, addr_t start
, addr_t end
, uint32 attributes
)
870 page_table_entry
*pt
;
871 page_directory_entry
*pd
;
872 page_root_entry
*pr
= map
->arch_data
->rtdir_virt
;
877 start
= ROUNDDOWN(start
, B_PAGE_SIZE
);
878 end
= ROUNDUP(end
, B_PAGE_SIZE
);
880 TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start
, end
, attributes
));
886 index
= VADDR_TO_PRENT(start
);
887 if (pr
[index
].type
!= DT_ROOT
) {
888 // no pagedir here, move the start up to access the next page table
889 start
= ROUNDUP(start
+ 1, B_PAGE_SIZE
);
894 status
= get_physical_page_tmap_internal(PRE_TO_PA(pr
[index
]),
895 &pd_pg
, PHYSICAL_PAGE_DONT_WAIT
);
896 } while (status
< B_OK
);
897 pd
= (page_directory_entry
*)pd_pg
;
898 // we want the table at rindex, not at rindex%(tbl/page)
899 pd
+= (index
% NUM_DIRTBL_PER_PAGE
) * NUM_DIRENT_PER_TBL
;
901 index
= VADDR_TO_PDENT(start
);
902 if (pd
[index
].type
!= DT_DIR
) {
903 // no pagetable here, move the start up to access the next page table
904 start
= ROUNDUP(start
+ 1, B_PAGE_SIZE
);
905 put_physical_page_tmap_internal(pd_pg
);
910 status
= get_physical_page_tmap_internal(PDE_TO_PA(pd
[index
]),
911 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
912 } while (status
< B_OK
);
913 pt
= (page_table_entry
*)pt_pg
;
914 // we want the table at rindex, not at rindex%(tbl/page)
915 pt
+= (index
% NUM_PAGETBL_PER_PAGE
) * NUM_PAGEENT_PER_TBL
;
917 for (index
= VADDR_TO_PTENT(start
);
918 (index
< NUM_PAGEENT_PER_TBL
) && (start
< end
);
919 index
++, start
+= B_PAGE_SIZE
) {
920 // XXX: handle indirect ?
921 if (pt
[index
].type
!= DT_PAGE
/*&& pt[index].type != DT_INDIRECT*/) {
922 // page mapping not valid
926 TRACE(("protect_tmap: protect page 0x%lx\n", start
));
928 pt
[index
].supervisor
= (attributes
& B_USER_PROTECTION
) == 0;
929 if ((attributes
& B_USER_PROTECTION
) != 0)
930 pt
[index
].write_protect
= (attributes
& B_WRITE_AREA
) == 0;
932 pt
[index
].write_protect
= (attributes
& B_KERNEL_WRITE_AREA
) == 0;
934 if (map
->arch_data
->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE
)
935 map
->arch_data
->pages_to_invalidate
[map
->arch_data
->num_invalidate_pages
] = start
;
937 map
->arch_data
->num_invalidate_pages
++;
940 put_physical_page_tmap_internal(pt_pg
);
941 put_physical_page_tmap_internal(pd_pg
);
948 clear_flags_tmap(vm_translation_map
*map
, addr_t va
, uint32 flags
)
950 page_table_entry
*pt
;
951 page_indirect_entry
*pi
;
952 page_directory_entry
*pd
;
953 page_root_entry
*pr
= map
->arch_data
->rtdir_virt
;
954 addr_t pd_pg
, pt_pg
, pi_pg
;
957 int tlb_flush
= false;
959 index
= VADDR_TO_PRENT(va
);
960 if (pr
[index
].type
!= DT_ROOT
) {
966 status
= get_physical_page_tmap_internal(PRE_TO_PA(pr
[index
]),
967 &pd_pg
, PHYSICAL_PAGE_DONT_WAIT
);
968 } while (status
< B_OK
);
969 pd
= (page_directory_entry
*)pd_pg
;
970 // we want the table at rindex, not at rindex%(tbl/page)
971 pd
+= (index
% NUM_DIRTBL_PER_PAGE
) * NUM_DIRENT_PER_TBL
;
974 index
= VADDR_TO_PDENT(va
);
975 if (pd
[index
].type
!= DT_DIR
) {
977 put_physical_page_tmap_internal(pd_pg
);
982 status
= get_physical_page_tmap_internal(PDE_TO_PA(pd
[index
]),
983 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
984 } while (status
< B_OK
);
985 pt
= (page_table_entry
*)pt_pg
;
986 // we want the table at rindex, not at rindex%(tbl/page)
987 pt
+= (index
% NUM_PAGETBL_PER_PAGE
) * NUM_PAGEENT_PER_TBL
;
989 index
= VADDR_TO_PTENT(va
);
991 // handle indirect descriptor
992 if (pt
[index
].type
== DT_INDIRECT
) {
993 pi
= (page_indirect_entry
*)pt
;
996 status
= get_physical_page_tmap_internal(PIE_TO_PA(pi
[index
]),
997 &pt_pg
, PHYSICAL_PAGE_DONT_WAIT
);
998 } while (status
< B_OK
);
999 pt
= (page_table_entry
*)pt_pg
;
1000 // add offset from start of page
1001 pt
+= PIE_TO_PO(pi
[index
]) / sizeof(page_table_entry
);
1002 // release the indirect table page
1003 put_physical_page_tmap_internal(pi_pg
);
1006 // clear out the flags we've been requested to clear
1007 if (flags
& PAGE_MODIFIED
) {
1008 pt
[index
].dirty
= 0;
1011 if (flags
& PAGE_ACCESSED
) {
1012 pt
[index
].accessed
= 0;
1016 put_physical_page_tmap_internal(pt_pg
);
1017 put_physical_page_tmap_internal(pd_pg
);
1020 if (map
->arch_data
->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE
)
1021 map
->arch_data
->pages_to_invalidate
[map
->arch_data
->num_invalidate_pages
] = va
;
1023 map
->arch_data
->num_invalidate_pages
++;
1031 flush_tmap(vm_translation_map
*map
)
1035 if (map
->arch_data
->num_invalidate_pages
<= 0)
1038 state
= disable_interrupts();
1040 if (map
->arch_data
->num_invalidate_pages
> PAGE_INVALIDATE_CACHE_SIZE
) {
1041 // invalidate all pages
1042 TRACE(("flush_tmap: %d pages to invalidate, invalidate all\n",
1043 map
->arch_data
->num_invalidate_pages
));
1045 if (IS_KERNEL_MAP(map
)) {
1046 arch_cpu_global_TLB_invalidate();
1048 arch_cpu_user_TLB_invalidate();
1051 TRACE(("flush_tmap: %d pages to invalidate, invalidate list\n",
1052 map
->arch_data
->num_invalidate_pages
));
1054 arch_cpu_invalidate_TLB_list(map
->arch_data
->pages_to_invalidate
,
1055 map
->arch_data
->num_invalidate_pages
);
1057 map
->arch_data
->num_invalidate_pages
= 0;
1059 restore_interrupts(state
);
1064 map_iospace_chunk(addr_t va
, addr_t pa
, uint32 flags
)
1067 page_table_entry
*pt
;
1070 pa
&= ~(B_PAGE_SIZE
- 1); // make sure it's page aligned
1071 va
&= ~(B_PAGE_SIZE
- 1); // make sure it's page aligned
1072 if (va
< sIOSpaceBase
|| va
>= (sIOSpaceBase
+ IOSPACE_SIZE
))
1073 panic("map_iospace_chunk: passed invalid va 0x%lx\n", va
);
1075 pt
= &iospace_pgtables
[(va
- sIOSpaceBase
) / B_PAGE_SIZE
];
1076 for (i
= 0; i
< NUM_PAGEENT_PER_TBL
; i
++, pa
+= B_PAGE_SIZE
) {
1077 init_page_table_entry(&pt
[i
]);
1078 pt
[i
].addr
= TA_TO_PTEA(pa
);
1079 pt
[i
].supervisor
= 1;
1080 pt
[i
].write_protect
= 0;
1081 pt
[i
].type
= DT_PAGE
;
1082 //XXX: not cachable ?
1084 #ifdef MMU_HAS_GLOBAL_PAGES
1089 state
= disable_interrupts();
1090 arch_cpu_invalidate_TLB_range(va
, va
+ (IOSPACE_CHUNK_SIZE
- B_PAGE_SIZE
));
1091 //smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE,
1092 // va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE), 0,
1093 // NULL, SMP_MSG_FLAG_SYNC);
1094 restore_interrupts(state
);
1101 get_physical_page_tmap_internal(addr_t pa
, addr_t
*va
, uint32 flags
)
1103 return generic_get_physical_page(pa
, va
, flags
);
1108 put_physical_page_tmap_internal(addr_t va
)
1110 return generic_put_physical_page(va
);
1115 get_physical_page_tmap(addr_t physicalAddress
, addr_t
*_virtualAddress
,
1118 return generic_get_physical_page(physicalAddress
, _virtualAddress
, 0);
1123 put_physical_page_tmap(addr_t virtualAddress
, void *handle
)
1125 return generic_put_physical_page(virtualAddress
);
1129 static vm_translation_map_ops tmap_ops
= {
1137 query_tmap_interrupt
,
1138 get_mapped_size_tmap
,
1142 get_physical_page_tmap
,
1143 put_physical_page_tmap
,
1144 get_physical_page_tmap
, // *_current_cpu()
1145 put_physical_page_tmap
, // *_current_cpu()
1146 get_physical_page_tmap
, // *_debug()
1147 put_physical_page_tmap
, // *_debug()
1148 // TODO: Replace the *_current_cpu() and *_debug() versions!
1150 generic_vm_memset_physical
,
1151 generic_vm_memcpy_from_physical
,
1152 generic_vm_memcpy_to_physical
,
1153 generic_vm_memcpy_physical_page
1154 // TODO: Verify that this is safe to use!
1163 m68k_vm_translation_map_init_map(vm_translation_map
*map
, bool kernel
)
1168 TRACE(("vm_translation_map_create\n"));
1170 // initialize the new object
1171 map
->ops
= &tmap_ops
;
1174 recursive_lock_init(&map
->lock
, "translation map");
1176 map
->arch_data
= (vm_translation_map_arch_info
*)malloc(sizeof(vm_translation_map_arch_info
));
1178 recursive_lock_destroy(&map
->lock
);
1182 map
->arch_data
->num_invalidate_pages
= 0;
1187 map
->arch_data
->rtdir_virt
= (page_root_entry
*)memalign(
1188 SIZ_ROOTTBL
, SIZ_ROOTTBL
);
1189 if (map
->arch_data
->rtdir_virt
== NULL
) {
1190 free(map
->arch_data
);
1191 recursive_lock_destroy(&map
->lock
);
1194 vm_get_page_mapping(VMAddressSpace::KernelID(),
1195 (addr_t
)map
->arch_data
->rtdir_virt
, (addr_t
*)&map
->arch_data
->rtdir_phys
);
1198 // we already know the kernel pgdir mapping
1199 map
->arch_data
->rtdir_virt
= sKernelVirtualPageRoot
;
1200 map
->arch_data
->rtdir_phys
= sKernelPhysicalPageRoot
;
1203 // zero out the bottom portion of the new rtdir
1204 memset(map
->arch_data
->rtdir_virt
+ FIRST_USER_PGROOT_ENT
, 0,
1205 NUM_USER_PGROOT_ENTS
* sizeof(page_root_entry
));
1207 // insert this new map into the map list
1209 int state
= disable_interrupts();
1210 acquire_spinlock(&tmap_list_lock
);
1212 // copy the top portion of the rtdir from the current one
1213 memcpy(map
->arch_data
->rtdir_virt
+ FIRST_KERNEL_PGROOT_ENT
,
1214 sKernelVirtualPageRoot
+ FIRST_KERNEL_PGROOT_ENT
,
1215 NUM_KERNEL_PGROOT_ENTS
* sizeof(page_root_entry
));
1217 map
->next
= tmap_list
;
1220 release_spinlock(&tmap_list_lock
);
1221 restore_interrupts(state
);
1229 m68k_vm_translation_map_init_kernel_map_post_sem(vm_translation_map
*map
)
1236 m68k_vm_translation_map_init(kernel_args
*args
)
1240 TRACE(("vm_translation_map_init: entry\n"));
1242 // page hole set up in stage2
1243 page_hole
= (page_table_entry
*)args
->arch_args
.page_hole
;
1244 // calculate where the pgdir would be
1245 page_hole_pgdir
= (page_directory_entry
*)(((unsigned int)args
->arch_args
.page_hole
) + (B_PAGE_SIZE
* 1024 - B_PAGE_SIZE
));
1246 // clear out the bottom 2 GB, unmap everything
1247 memset(page_hole_pgdir
+ FIRST_USER_PGDIR_ENT
, 0, sizeof(page_directory_entry
) * NUM_USER_PGDIR_ENTS
);
1250 sKernelPhysicalPageRoot
= (page_root_entry
*)args
->arch_args
.phys_pgroot
;
1251 sKernelVirtualPageRoot
= (page_root_entry
*)args
->arch_args
.vir_pgroot
;
1253 sQueryDesc
.type
= DT_INVALID
;
1255 B_INITIALIZE_SPINLOCK(&tmap_list_lock
);
1258 // allocate some space to hold physical page mapping info
1259 //XXX: check page count
1260 // we already have all page directories allocated by the bootloader,
1261 // we only need page tables
1263 iospace_pgtables
= (page_table_entry
*)vm_allocate_early(args
,
1264 B_PAGE_SIZE
* (IOSPACE_SIZE
/ (B_PAGE_SIZE
* NUM_PAGEENT_PER_TBL
* NUM_PAGETBL_PER_PAGE
)), ~0L,
1265 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0);
1267 TRACE(("iospace_pgtables %p\n", iospace_pgtables
));
1269 // init physical page mapper
1270 error
= generic_vm_physical_page_mapper_init(args
, map_iospace_chunk
,
1271 &sIOSpaceBase
, IOSPACE_SIZE
, IOSPACE_CHUNK_SIZE
);
1274 TRACE(("iospace at %p\n", sIOSpaceBase
));
1275 // initialize our data structures
1276 memset(iospace_pgtables
, 0, B_PAGE_SIZE
* (IOSPACE_SIZE
/ (B_PAGE_SIZE
* NUM_PAGEENT_PER_TBL
* NUM_PAGETBL_PER_PAGE
)));
1278 TRACE(("mapping iospace_pgtables\n"));
1280 // put the array of pgtables directly into the kernel pagedir
1281 // these will be wired and kept mapped into virtual space to be
1283 // note the bootloader allocates all page directories for us
1284 // as a contiguous block.
1285 // we also still have transparent translation enabled, va==pa.
1287 addr_t phys_pgtable
;
1288 addr_t virt_pgtable
;
1289 page_root_entry
*pr
= sKernelVirtualPageRoot
;
1290 page_directory_entry
*pd
;
1291 page_directory_entry
*e
;
1295 virt_pgtable
= (addr_t
)iospace_pgtables
;
1297 for (i
= 0; i
< (IOSPACE_SIZE
/ (B_PAGE_SIZE
* NUM_PAGEENT_PER_TBL
));
1298 i
++, virt_pgtable
+= SIZ_PAGETBL
) {
1299 // early_query handles non-page-aligned addresses
1300 early_query(virt_pgtable
, &phys_pgtable
);
1301 index
= VADDR_TO_PRENT(sIOSpaceBase
) + i
/ NUM_DIRENT_PER_TBL
;
1302 pd
= (page_directory_entry
*)PRE_TO_TA(pr
[index
]);
1303 e
= &pd
[(VADDR_TO_PDENT(sIOSpaceBase
) + i
) % NUM_DIRENT_PER_TBL
];
1304 put_pgtable_in_pgdir(e
, phys_pgtable
,
1305 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
1309 TRACE(("vm_translation_map_init: done\n"));
1316 m68k_vm_translation_map_init_post_sem(kernel_args
*args
)
1318 return generic_vm_physical_page_mapper_init_post_sem(args
);
1323 m68k_vm_translation_map_init_post_area(kernel_args
*args
)
1325 // now that the vm is initialized, create a region that represents
1332 TRACE(("vm_translation_map_init_post_area: entry\n"));
1334 // unmap the page hole hack we were using before
1335 #warning M68K: FIXME
1336 //sKernelVirtualPageRoot[1023].present = 0;
1338 page_hole_pgdir
= NULL
;
1342 temp
= (void *)sKernelVirtualPageRoot
;
1343 area
= create_area("kernel_pgdir", &temp
, B_EXACT_ADDRESS
, B_PAGE_SIZE
,
1344 B_ALREADY_WIRED
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
1348 temp
= (void *)iospace_pgtables
;
1349 area
= create_area("iospace_pgtables", &temp
, B_EXACT_ADDRESS
,
1350 B_PAGE_SIZE
* (IOSPACE_SIZE
/ (B_PAGE_SIZE
* 1024)),
1351 B_ALREADY_WIRED
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
1355 error
= generic_vm_physical_page_mapper_init_post_area(args
);
1359 // this area is used for query_tmap_interrupt()
1360 // TODO: Note, this only works as long as all pages belong to the same
1361 // page table, which is not yet enforced (or even tested)!
1362 // Note we don't support SMP which makes things simpler.
1364 area
= vm_create_null_area(VMAddressSpace::KernelID(),
1365 "interrupt query pages", (void **)&queryPage
, B_ANY_ADDRESS
,
1370 // insert the indirect descriptor in the tree so we can map the page we want from it.
1373 page_directory_entry
*pageDirEntry
;
1374 page_indirect_entry
*pageTableEntry
;
1375 addr_t physicalPageDir
, physicalPageTable
;
1376 addr_t physicalIndirectDesc
;
1379 // first get pa for the indirect descriptor
1381 index
= VADDR_TO_PRENT((addr_t
)&sQueryDesc
);
1382 physicalPageDir
= PRE_TO_PA(sKernelVirtualPageRoot
[index
]);
1384 get_physical_page_tmap_internal(physicalPageDir
,
1385 (addr_t
*)&pageDirEntry
, PHYSICAL_PAGE_DONT_WAIT
);
1387 index
= VADDR_TO_PDENT((addr_t
)&sQueryDesc
);
1388 physicalPageTable
= PDE_TO_PA(pageDirEntry
[index
]);
1390 get_physical_page_tmap_internal(physicalPageTable
,
1391 (addr_t
*)&pageTableEntry
, PHYSICAL_PAGE_DONT_WAIT
);
1393 index
= VADDR_TO_PTENT((addr_t
)&sQueryDesc
);
1396 physicalIndirectDesc
= PTE_TO_PA(pageTableEntry
[index
]);
1398 physicalIndirectDesc
+= ((addr_t
)&sQueryDesc
) % B_PAGE_SIZE
;
1400 put_physical_page_tmap_internal((addr_t
)pageTableEntry
);
1401 put_physical_page_tmap_internal((addr_t
)pageDirEntry
);
1403 // then the va for the page table for the query page.
1405 //sQueryPageTable = (page_indirect_entry *)(queryPage);
1407 index
= VADDR_TO_PRENT(queryPage
);
1408 physicalPageDir
= PRE_TO_PA(sKernelVirtualPageRoot
[index
]);
1410 get_physical_page_tmap_internal(physicalPageDir
,
1411 (addr_t
*)&pageDirEntry
, PHYSICAL_PAGE_DONT_WAIT
);
1413 index
= VADDR_TO_PDENT(queryPage
);
1414 physicalPageTable
= PDE_TO_PA(pageDirEntry
[index
]);
1416 get_physical_page_tmap_internal(physicalPageTable
,
1417 (addr_t
*)&pageTableEntry
, PHYSICAL_PAGE_DONT_WAIT
);
1419 index
= VADDR_TO_PTENT(queryPage
);
1421 put_page_indirect_entry_in_pgtable(&pageTableEntry
[index
], physicalIndirectDesc
,
1422 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, false);
1424 put_physical_page_tmap_internal((addr_t
)pageTableEntry
);
1425 put_physical_page_tmap_internal((addr_t
)pageDirEntry
);
1426 //invalidate_TLB(sQueryPageTable);
1428 // qmery_tmap_interrupt checks for the NULL, now it can use it
1429 sQueryPage
= queryPage
;
1431 TRACE(("vm_translation_map_init_post_area: done\n"));
1436 // almost directly taken from boot mmu code
1438 // XXX horrible back door to map a page quickly regardless of translation map object, etc.
1439 // used only during VM setup.
1440 // uses a 'page hole' set up in the stage 2 bootloader. The page hole is created by pointing one of
1441 // the pgdir entries back at itself, effectively mapping the contents of all of the 4MB of pagetables
1442 // into a 4 MB region. It's only used here, and is later unmapped.
1445 m68k_vm_translation_map_early_map(kernel_args
*args
, addr_t va
, addr_t pa
,
1446 uint8 attributes
, addr_t (*get_free_page
)(kernel_args
*))
1448 page_root_entry
*pr
= (page_root_entry
*)sKernelPhysicalPageRoot
;
1449 page_directory_entry
*pd
;
1450 page_table_entry
*pt
;
1454 TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa
, va
));
1456 // everything much simpler here because pa = va
1457 // thanks to transparent translation which hasn't been disabled yet
1459 index
= VADDR_TO_PRENT(va
);
1460 if (pr
[index
].type
!= DT_ROOT
) {
1461 unsigned aindex
= index
& ~(NUM_DIRTBL_PER_PAGE
-1); /* aligned */
1462 TRACE(("missing page root entry %d ai %d\n", index
, aindex
));
1463 tbl
= get_free_page(args
) * B_PAGE_SIZE
;
1466 TRACE(("early_map: asked for free page for pgdir. 0x%lx\n", tbl
));
1468 memset((void *)tbl
, 0, B_PAGE_SIZE
);
1469 // for each pgdir on the allocated page:
1470 for (i
= 0; i
< NUM_DIRTBL_PER_PAGE
; i
++) {
1471 put_pgdir_in_pgroot(&pr
[aindex
+ i
], tbl
, attributes
);
1472 //TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
1474 //TRACE(("clearing table[%d]\n", i));
1475 pd
= (page_directory_entry
*)tbl
;
1476 for (int32 j
= 0; j
< NUM_DIRENT_PER_TBL
; j
++)
1477 *(page_directory_entry_scalar
*)(&pd
[j
]) = DFL_DIRENT_VAL
;
1481 pd
= (page_directory_entry
*)PRE_TO_TA(pr
[index
]);
1483 index
= VADDR_TO_PDENT(va
);
1484 if (pd
[index
].type
!= DT_DIR
) {
1485 unsigned aindex
= index
& ~(NUM_PAGETBL_PER_PAGE
-1); /* aligned */
1486 TRACE(("missing page dir entry %d ai %d\n", index
, aindex
));
1487 tbl
= get_free_page(args
) * B_PAGE_SIZE
;
1490 TRACE(("early_map: asked for free page for pgtable. 0x%lx\n", tbl
));
1492 memset((void *)tbl
, 0, B_PAGE_SIZE
);
1493 // for each pgdir on the allocated page:
1494 for (i
= 0; i
< NUM_PAGETBL_PER_PAGE
; i
++) {
1495 put_pgtable_in_pgdir(&pd
[aindex
+ i
], tbl
, attributes
);
1497 //TRACE(("clearing table[%d]\n", i));
1498 pt
= (page_table_entry
*)tbl
;
1499 for (int32 j
= 0; j
< NUM_PAGEENT_PER_TBL
; j
++)
1500 *(page_table_entry_scalar
*)(&pt
[j
]) = DFL_PAGEENT_VAL
;
1504 pt
= (page_table_entry
*)PDE_TO_TA(pd
[index
]);
1506 index
= VADDR_TO_PTENT(va
);
1507 put_page_table_entry_in_pgtable(&pt
[index
], pa
, attributes
,
1508 IS_KERNEL_ADDRESS(va
));
1510 arch_cpu_invalidate_TLB_range(va
, va
);
1517 m68k_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress
,