2 * Copyright 2010-2012, François, revol@free.fr.
3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5 * Distributed under the terms of the MIT License.
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
12 #include "paging/040/M68KPagingMethod040.h"
17 #include <AutoDeleter.h>
19 #include <arch_system_info.h>
20 #include <boot/kernel_args.h>
24 #include <vm/VMAddressSpace.h>
26 #include "paging/040/M68KPagingStructures040.h"
27 #include "paging/040/M68KVMTranslationMap040.h"
28 #include "paging/m68k_physical_page_mapper.h"
29 #include "paging/m68k_physical_page_mapper_large_memory.h"
32 #define TRACE_M68K_PAGING_METHOD_32_BIT
33 #ifdef TRACE_M68K_PAGING_METHOD_32_BIT
34 # define TRACE(x...) dprintf(x)
36 # define TRACE(x...) ;
40 /* Slots per pool for the physical page mapper.
41 * Since m68k page tables are smaller than 1 page, but we allocate them
42 * at page granularity anyway, just go for this.
44 #define SLOTS_PER_POOL 1024
46 using M68KLargePhysicalPageMapper::PhysicalPageSlot
;
48 //XXX: make it a class member
49 //static page_table_entry sQueryDesc __attribute__ (( aligned (4) ));
51 //static addr_t sIOSpaceBase;
53 //XXX: stuff it in the class
56 init_page_root_entry(page_root_entry
*entry
)
59 *entry
= DFL_ROOTENT_VAL
;
64 update_page_root_entry(page_root_entry
*entry
, page_root_entry
*with
)
66 // update page directory entry atomically
72 init_page_directory_entry(page_directory_entry
*entry
)
74 *entry
= DFL_DIRENT_VAL
;
79 update_page_directory_entry(page_directory_entry
*entry
, page_directory_entry
*with
)
81 // update page directory entry atomically
87 init_page_table_entry(page_table_entry
*entry
)
89 *entry
= DFL_PAGEENT_VAL
;
94 update_page_table_entry(page_table_entry
*entry
, page_table_entry
*with
)
96 // update page table entry atomically
97 // XXX: is it ?? (long desc?)
103 init_page_indirect_entry(page_indirect_entry
*entry
)
105 #warning M68K: is it correct ?
106 *entry
= DFL_PAGEENT_VAL
;
111 update_page_indirect_entry(page_indirect_entry
*entry
, page_indirect_entry
*with
)
113 // update page table entry atomically
114 // XXX: is it ?? (long desc?)
120 // #pragma mark - M68KPagingMethod040::PhysicalPageSlotPool
123 struct M68KPagingMethod040::PhysicalPageSlotPool
124 : M68KLargePhysicalPageMapper::PhysicalPageSlotPool
{
126 virtual ~PhysicalPageSlotPool();
128 status_t
InitInitial(kernel_args
* args
);
129 status_t
InitInitialPostArea(kernel_args
* args
);
131 void Init(area_id dataArea
, void* data
,
132 area_id virtualArea
, addr_t virtualBase
);
134 virtual status_t
AllocatePool(
135 M68KLargePhysicalPageMapper
136 ::PhysicalPageSlotPool
*& _pool
);
137 virtual void Map(phys_addr_t physicalAddress
,
138 addr_t virtualAddress
);
141 static PhysicalPageSlotPool sInitialPhysicalPagePool
;
145 area_id fVirtualArea
;
147 page_table_entry
* fPageTable
;
151 M68KPagingMethod040::PhysicalPageSlotPool
152 M68KPagingMethod040::PhysicalPageSlotPool::sInitialPhysicalPagePool
;
155 M68KPagingMethod040::PhysicalPageSlotPool::~PhysicalPageSlotPool()
161 M68KPagingMethod040::PhysicalPageSlotPool::InitInitial(kernel_args
* args
)
163 // allocate a virtual address range for the pages to be mapped into
164 addr_t virtualBase
= vm_allocate_early(args
, SLOTS_PER_POOL
* B_PAGE_SIZE
,
165 0, 0, kPageTableAlignment
);
166 if (virtualBase
== 0) {
167 panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
168 "physical page pool space in virtual address space!");
172 // allocate memory for the page table and data
173 size_t areaSize
= B_PAGE_SIZE
+ sizeof(PhysicalPageSlot
[SLOTS_PER_POOL
]);
174 page_table_entry
* pageTable
= (page_table_entry
*)vm_allocate_early(args
,
175 areaSize
, ~0L, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0);
177 // prepare the page table
178 _EarlyPreparePageTables(pageTable
, virtualBase
,
179 SLOTS_PER_POOL
* B_PAGE_SIZE
);
181 // init the pool structure and add the initial pool
182 Init(-1, pageTable
, -1, (addr_t
)virtualBase
);
189 M68KPagingMethod040::PhysicalPageSlotPool::InitInitialPostArea(
192 #warning M68K:WRITEME
193 // create an area for the (already allocated) data
194 size_t areaSize
= B_PAGE_SIZE
+ sizeof(PhysicalPageSlot
[SLOTS_PER_POOL
]);
195 void* temp
= fPageTable
;
196 area_id area
= create_area("physical page pool", &temp
,
197 B_EXACT_ADDRESS
, areaSize
, B_ALREADY_WIRED
,
198 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
200 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
201 "create area for physical page pool.");
206 // create an area for the virtual address space
207 temp
= (void*)fVirtualBase
;
208 area
= vm_create_null_area(VMAddressSpace::KernelID(),
209 "physical page pool space", &temp
, B_EXACT_ADDRESS
,
210 SLOTS_PER_POOL
* B_PAGE_SIZE
, 0);
212 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
213 "create area for physical page pool space.");
223 M68KPagingMethod040::PhysicalPageSlotPool::Init(area_id dataArea
, void* data
,
224 area_id virtualArea
, addr_t virtualBase
)
226 fDataArea
= dataArea
;
227 fVirtualArea
= virtualArea
;
228 fVirtualBase
= virtualBase
;
229 fPageTable
= (page_table_entry
*)data
;
232 fSlots
= (PhysicalPageSlot
*)(fPageTable
+ SLOTS_PER_POOL
);
233 addr_t slotAddress
= virtualBase
;
234 for (int32 i
= 0; i
< SLOTS_PER_POOL
; i
++, slotAddress
+= B_PAGE_SIZE
) {
235 PhysicalPageSlot
* slot
= &fSlots
[i
];
236 slot
->next
= slot
+ 1;
238 slot
->address
= slotAddress
;
241 fSlots
[1023].next
= NULL
;
247 M68KPagingMethod040::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress
,
248 addr_t virtualAddress
)
250 page_table_entry
& pte
= fPageTable
[
251 (virtualAddress
- fVirtualBase
) / B_PAGE_SIZE
];
252 pte
= TA_TO_PTEA(physicalAddress
) | DT_PAGE
253 | M68K_PTE_SUPERVISOR
| M68K_PTE_GLOBAL
;
255 arch_cpu_invalidate_TLB_range(virtualAddress
, virtualAddress
);
260 M68KPagingMethod040::PhysicalPageSlotPool::AllocatePool(
261 M68KLargePhysicalPageMapper::PhysicalPageSlotPool
*& _pool
)
263 // create the pool structure
264 PhysicalPageSlotPool
* pool
= new(std::nothrow
) PhysicalPageSlotPool
;
267 ObjectDeleter
<PhysicalPageSlotPool
> poolDeleter(pool
);
269 // create an area that can contain the page table and the slot
271 size_t areaSize
= B_PAGE_SIZE
+ sizeof(PhysicalPageSlot
[SLOTS_PER_POOL
]);
273 virtual_address_restrictions virtualRestrictions
= {};
274 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
275 physical_address_restrictions physicalRestrictions
= {};
276 area_id dataArea
= create_area_etc(B_SYSTEM_TEAM
, "physical page pool",
277 PAGE_ALIGN(areaSize
), B_FULL_LOCK
,
278 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, CREATE_AREA_DONT_WAIT
, 0,
279 &virtualRestrictions
, &physicalRestrictions
, &data
);
283 // create the null area for the virtual address space
285 area_id virtualArea
= vm_create_null_area(
286 VMAddressSpace::KernelID(), "physical page pool space",
287 &virtualBase
, B_ANY_KERNEL_BLOCK_ADDRESS
, SLOTS_PER_POOL
* B_PAGE_SIZE
,
288 CREATE_AREA_PRIORITY_VIP
);
289 if (virtualArea
< 0) {
290 delete_area(dataArea
);
294 // prepare the page table
295 memset(data
, 0, B_PAGE_SIZE
);
297 // get the page table's physical address
298 phys_addr_t physicalTable
;
299 M68KVMTranslationMap040
* map
= static_cast<M68KVMTranslationMap040
*>(
300 VMAddressSpace::Kernel()->TranslationMap());
302 cpu_status state
= disable_interrupts();
303 map
->QueryInterrupt((addr_t
)data
, &physicalTable
, &dummyFlags
);
304 restore_interrupts(state
);
306 #warning M68K:FIXME: insert *all* page tables!
309 // put the page table into the page directory
310 int32 index
= (addr_t
)virtualBase
/ (B_PAGE_SIZE
* SLOTS_PER_POOL
);
311 page_directory_entry
* entry
312 = &map
->PagingStructures040()->pgdir_virt
[index
];
313 PutPageTableInPageDir(entry
, physicalTable
,
314 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
315 M68KPagingStructures040::UpdateAllPageDirs(index
, *entry
);
318 // init the pool structure
319 pool
->Init(dataArea
, data
, virtualArea
, (addr_t
)virtualBase
);
320 poolDeleter
.Detach();
326 // #pragma mark - M68KPagingMethod040
329 M68KPagingMethod040::M68KPagingMethod040()
332 //fPageHolePageDir(NULL),
333 fKernelPhysicalPageRoot(0),
334 fKernelVirtualPageRoot(NULL
),
335 fPhysicalPageMapper(NULL
),
336 fKernelPhysicalPageMapper(NULL
)
341 M68KPagingMethod040::~M68KPagingMethod040()
347 M68KPagingMethod040::Init(kernel_args
* args
,
348 VMPhysicalPageMapper
** _physicalPageMapper
)
350 TRACE("M68KPagingMethod040::Init(): entry\n");
352 #if 0//XXX:We might actually need this trick to support Milan
353 // page hole set up in stage2
354 fPageHole
= (page_table_entry
*)args
->arch_args
.page_hole
;
355 // calculate where the pgdir would be
356 fPageHolePageDir
= (page_directory_entry
*)
357 (((addr_t
)args
->arch_args
.page_hole
)
358 + (B_PAGE_SIZE
* 1024 - B_PAGE_SIZE
));
359 // clear out the bottom 2 GB, unmap everything
360 memset(fPageHolePageDir
+ FIRST_USER_PGDIR_ENT
, 0,
361 sizeof(page_directory_entry
) * NUM_USER_PGDIR_ENTS
);
364 fKernelPhysicalPageRoot
= (uint32
)args
->arch_args
.phys_pgroot
;
365 fKernelVirtualPageRoot
= (page_root_entry
*)args
->arch_args
.vir_pgroot
;
367 #ifdef TRACE_M68K_PAGING_METHOD_32_BIT
368 //TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
369 TRACE("page root: %p (physical: %#" B_PRIx32
")\n",
370 fKernelVirtualPageRoot
, fKernelPhysicalPageRoot
);
373 //sQueryDesc.type = DT_INVALID;
375 M68KPagingStructures040::StaticInit();
377 // create the initial pool for the physical page mapper
378 PhysicalPageSlotPool
* pool
379 = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool
)
380 PhysicalPageSlotPool
;
381 status_t error
= pool
->InitInitial(args
);
383 panic("M68KPagingMethod040::Init(): Failed to create initial pool "
384 "for physical page mapper!");
388 // create physical page mapper
389 large_memory_physical_page_ops_init(args
, pool
, fPhysicalPageMapper
,
390 fKernelPhysicalPageMapper
);
391 // TODO: Select the best page mapper!
393 TRACE("M68KPagingMethod040::Init(): done\n");
395 *_physicalPageMapper
= fPhysicalPageMapper
;
401 M68KPagingMethod040::InitPostArea(kernel_args
* args
)
403 TRACE("M68KPagingMethod040::InitPostArea(): entry\n");
404 // now that the vm is initialized, create an area that represents
411 // unmap the page hole hack we were using before
412 fKernelVirtualPageDirectory
[1023] = 0;
413 fPageHolePageDir
= NULL
;
417 temp
= (void*)fKernelVirtualPageRoot
;
418 area
= create_area("kernel_pgdir", &temp
, B_EXACT_ADDRESS
, B_PAGE_SIZE
,
419 B_ALREADY_WIRED
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
423 error
= PhysicalPageSlotPool::sInitialPhysicalPagePool
424 .InitInitialPostArea(args
);
428 // this area is used for query_tmap_interrupt()
429 // TODO: Note, this only works as long as all pages belong to the same
430 // page table, which is not yet enforced (or even tested)!
431 // Note we don't support SMP which makes things simpler.
432 #if 0 //XXX: Do we need this anymore?
433 area
= vm_create_null_area(VMAddressSpace::KernelID(),
434 "interrupt query pages", (void **)&queryPage
, B_ANY_ADDRESS
,
439 // insert the indirect descriptor in the tree so we can map the page we want from it.
443 TRACE("M68KPagingMethod040::InitPostArea(): done\n");
449 M68KPagingMethod040::CreateTranslationMap(bool kernel
, VMTranslationMap
** _map
)
451 M68KVMTranslationMap040
* map
;
453 map
= new(std::nothrow
) M68KVMTranslationMap040
;
457 status_t error
= map
->Init(kernel
);
469 M68KPagingMethod040::MapEarly(kernel_args
* args
, addr_t virtualAddress
,
470 phys_addr_t physicalAddress
, uint8 attributes
,
471 phys_addr_t (*get_free_page
)(kernel_args
*))
473 // XXX horrible back door to map a page quickly regardless of translation
474 // map object, etc. used only during VM setup.
475 // uses a 'page hole' set up in the stage 2 bootloader. The page hole is
476 // created by pointing one of the pgdir entries back at itself, effectively
477 // mapping the contents of all of the 4MB of pagetables into a 4 MB region.
478 // It's only used here, and is later unmapped.
480 addr_t va
= virtualAddress
;
481 phys_addr_t pa
= physicalAddress
;
482 page_root_entry
*pr
= (page_root_entry
*)fKernelPhysicalPageRoot
;
483 page_directory_entry
*pd
;
484 page_table_entry
*pt
;
488 TRACE("040::MapEarly: entry pa 0x%lx va 0x%lx\n", pa
, va
);
490 // everything much simpler here because pa = va
491 // thanks to transparent translation which hasn't been disabled yet
493 index
= VADDR_TO_PRENT(va
);
494 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
495 unsigned aindex
= index
& ~(NUM_DIRTBL_PER_PAGE
-1); /* aligned */
496 TRACE("missing page root entry %d ai %d\n", index
, aindex
);
497 tbl
= get_free_page(args
) * B_PAGE_SIZE
;
500 TRACE("040::MapEarly: asked for free page for pgdir. 0x%lx\n", tbl
);
502 memset((void *)tbl
, 0, B_PAGE_SIZE
);
503 // for each pgdir on the allocated page:
504 for (i
= 0; i
< NUM_DIRTBL_PER_PAGE
; i
++) {
505 PutPageDirInPageRoot(&pr
[aindex
+ i
], tbl
, attributes
);
506 //TRACE("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr);
508 //TRACE("clearing table[%d]\n", i);
509 pd
= (page_directory_entry
*)tbl
;
510 for (int32 j
= 0; j
< NUM_DIRENT_PER_TBL
; j
++)
511 pd
[j
] = DFL_DIRENT_VAL
;
515 pd
= (page_directory_entry
*)PRE_TO_TA(pr
[index
]);
517 index
= VADDR_TO_PDENT(va
);
518 if (PDE_TYPE(pd
[index
]) != DT_DIR
) {
519 unsigned aindex
= index
& ~(NUM_PAGETBL_PER_PAGE
-1); /* aligned */
520 TRACE("missing page dir entry %d ai %d\n", index
, aindex
);
521 tbl
= get_free_page(args
) * B_PAGE_SIZE
;
524 TRACE("early_map: asked for free page for pgtable. 0x%lx\n", tbl
);
526 memset((void *)tbl
, 0, B_PAGE_SIZE
);
527 // for each pgdir on the allocated page:
528 for (i
= 0; i
< NUM_PAGETBL_PER_PAGE
; i
++) {
529 PutPageTableInPageDir(&pd
[aindex
+ i
], tbl
, attributes
);
531 //TRACE("clearing table[%d]\n", i);
532 pt
= (page_table_entry
*)tbl
;
533 for (int32 j
= 0; j
< NUM_PAGEENT_PER_TBL
; j
++)
534 pt
[j
] = DFL_PAGEENT_VAL
;
538 pt
= (page_table_entry
*)PDE_TO_TA(pd
[index
]);
540 index
= VADDR_TO_PTENT(va
);
541 // now, fill in the pentry
542 PutPageTableEntryInTable(&pt
[index
],
543 physicalAddress
, attributes
, 0, IS_KERNEL_ADDRESS(virtualAddress
));
545 arch_cpu_invalidate_TLB_range(va
, va
);
552 // check to see if a page table exists for this range
553 int index
= VADDR_TO_PDENT(virtualAddress
);
554 if ((fPageHolePageDir
[index
] & M68K_PDE_PRESENT
) == 0) {
556 page_directory_entry
*e
;
557 // we need to allocate a pgtable
558 pgtable
= get_free_page(args
);
559 // pgtable is in pages, convert to physical address
560 pgtable
*= B_PAGE_SIZE
;
562 TRACE("M68KPagingMethod040::MapEarly(): asked for free page for "
563 "pgtable. %#" B_PRIxPHYSADDR
"\n", pgtable
);
565 // put it in the pgdir
566 e
= &fPageHolePageDir
[index
];
567 PutPageTableInPageDir(e
, pgtable
, attributes
);
569 // zero it out in it's new mapping
570 memset((unsigned int*)((addr_t
)fPageHole
571 + (virtualAddress
/ B_PAGE_SIZE
/ 1024) * B_PAGE_SIZE
),
576 (fPageHole
[virtualAddress
/ B_PAGE_SIZE
] & M68K_PTE_PRESENT
) == 0,
577 "virtual address: %#" B_PRIxADDR
", pde: %#" B_PRIx32
578 ", existing pte: %#" B_PRIx32
, virtualAddress
, fPageHolePageDir
[index
],
579 fPageHole
[virtualAddress
/ B_PAGE_SIZE
]);
588 M68KPagingMethod040::IsKernelPageAccessible(addr_t virtualAddress
,
591 #warning M68K: WRITEME
597 M68KPagingMethod040::SetPageRoot(uint32 pageRoot
)
599 #warning M68K:TODO:override this for 060
601 rp
= pageRoot
& ~((1 << 9) - 1);
611 M68KPagingMethod040::PutPageDirInPageRoot(page_root_entry
* entry
,
612 phys_addr_t pgdirPhysical
, uint32 attributes
)
614 *entry
= TA_TO_PREA(pgdirPhysical
)
615 | DT_DIR
; // it's a page directory entry
617 // ToDo: we ignore the attributes of the page table - for compatibility
618 // with BeOS we allow having user accessible areas in the kernel address
619 // space. This is currently being used by some drivers, mainly for the
620 // frame buffer. Our current real time data implementation makes use of
622 // We might want to get rid of this possibility one day, especially if
623 // we intend to port it to a platform that does not support this.
630 M68KPagingMethod040::PutPageTableInPageDir(page_directory_entry
* entry
,
631 phys_addr_t pgtablePhysical
, uint32 attributes
)
633 *entry
= TA_TO_PDEA(pgtablePhysical
)
634 | DT_DIR
; // it's a page directory entry
640 M68KPagingMethod040::PutPageTableEntryInTable(page_table_entry
* entry
,
641 phys_addr_t physicalAddress
, uint32 attributes
, uint32 memoryType
,
644 page_table_entry page
= TA_TO_PTEA(physicalAddress
)
646 #ifdef PAGE_HAS_GLOBAL_BIT
647 | (globalPage
? M68K_PTE_GLOBAL
: 0)
649 | MemoryTypeToPageTableEntryFlags(memoryType
);
651 // if the page is user accessible, it's automatically
652 // accessible in kernel space, too (but with the same
654 if ((attributes
& B_USER_PROTECTION
) == 0) {
655 page
|= M68K_PTE_SUPERVISOR
;
656 if ((attributes
& B_KERNEL_WRITE_AREA
) == 0)
657 page
|= M68K_PTE_READONLY
;
658 } else if ((attributes
& B_WRITE_AREA
) == 0)
659 page
|= M68K_PTE_READONLY
;
662 // put it in the page table
663 *(volatile page_table_entry
*)entry
= page
;
667 M68KPagingMethod040::_EarlyPreparePageTables(page_table_entry
* pageTables
,
668 addr_t address
, size_t size
)
670 memset(pageTables
, 0, B_PAGE_SIZE
*
671 (size
/ (B_PAGE_SIZE
* NUM_PAGEENT_PER_TBL
* NUM_PAGETBL_PER_PAGE
)));
673 // put the array of pgtables directly into the kernel pagedir
674 // these will be wired and kept mapped into virtual space to be easy to get
676 // note the bootloader allocates all page directories for us
677 // as a contiguous block.
678 // we also still have transparent translation enabled, va==pa.
681 addr_t virtualTable
= (addr_t
)pageTables
;
683 = M68KPagingMethod040::Method()->fKernelVirtualPageRoot
;
684 page_directory_entry
*pd
;
685 page_directory_entry
*e
;
687 for (size_t i
= 0; i
< (size
/ (B_PAGE_SIZE
* NUM_PAGEENT_PER_TBL
));
688 i
++, virtualTable
+= SIZ_PAGETBL
) {
689 // early_query handles non-page-aligned addresses
690 phys_addr_t physicalTable
= 0;
691 _EarlyQuery(virtualTable
, &physicalTable
);
692 index
= VADDR_TO_PRENT(address
) + i
/ NUM_DIRENT_PER_TBL
;
693 pd
= (page_directory_entry
*)PRE_TO_TA(pr
[index
]);
694 e
= &pd
[(VADDR_TO_PDENT(address
) + i
) % NUM_DIRENT_PER_TBL
];
695 PutPageTableInPageDir(e
, physicalTable
,
696 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
702 //! TODO: currently assumes this translation map is active
704 M68KPagingMethod040::_EarlyQuery(addr_t virtualAddress
,
705 phys_addr_t
*_physicalAddress
)
708 M68KPagingMethod040
* method
= M68KPagingMethod040::Method();
710 page_root_entry
*pr
= method
->fKernelVirtualPageRoot
;
711 page_directory_entry
*pd
;
712 page_indirect_entry
*pi
;
713 page_table_entry
*pt
;
716 status_t err
= B_ERROR
; // no pagetable here
717 TRACE("%s(%p,)\n", __FUNCTION__
, virtualAddress
);
719 // this is used before the vm is fully up, it uses the
720 // transparent translation of the first 256MB
721 // as set up by the bootloader.
723 index
= VADDR_TO_PRENT(virtualAddress
);
724 TRACE("%s: pr[%d].type %d\n", __FUNCTION__
, index
, PRE_TYPE(pr
[index
]));
725 if (pr
&& PRE_TYPE(pr
[index
]) == DT_ROOT
) {
726 pa
= PRE_TO_TA(pr
[index
]);
727 // pa == va when in TT
728 // and no need to fiddle with cache
729 pd
= (page_directory_entry
*)pa
;
731 index
= VADDR_TO_PDENT(virtualAddress
);
732 TRACE("%s: pd[%d].type %d\n", __FUNCTION__
, index
,
733 pd
?(PDE_TYPE(pd
[index
])):-1);
734 if (pd
&& PDE_TYPE(pd
[index
]) == DT_DIR
) {
735 pa
= PDE_TO_TA(pd
[index
]);
736 pt
= (page_table_entry
*)pa
;
738 index
= VADDR_TO_PTENT(virtualAddress
);
739 TRACE("%s: pt[%d].type %d\n", __FUNCTION__
, index
,
740 pt
?(PTE_TYPE(pt
[index
])):-1);
741 if (pt
&& PTE_TYPE(pt
[index
]) == DT_INDIRECT
) {
742 pi
= (page_indirect_entry
*)pt
;
743 pa
= PIE_TO_TA(pi
[index
]);
744 pt
= (page_table_entry
*)pa
;
745 index
= 0; // single descriptor
748 if (pt
&& PIE_TYPE(pt
[index
]) == DT_PAGE
) {
749 *_physicalAddress
= PTE_TO_PA(pt
[index
]);
750 // we should only be passed page va, but just in case.
751 *_physicalAddress
+= virtualAddress
% B_PAGE_SIZE
;
761 int index
= VADDR_TO_PDENT(virtualAddress
);
762 if ((method
->PageHolePageDir()[index
] & M68K_PDE_PRESENT
) == 0) {
767 page_table_entry
* entry
= method
->PageHole() + virtualAddress
/ B_PAGE_SIZE
;
768 if ((*entry
& M68K_PTE_PRESENT
) == 0) {
769 // page mapping not valid
773 *_physicalAddress
= *entry
& M68K_PTE_ADDRESS_MASK
;