2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include "paging/040/M68KVMTranslationMap040.h"
18 #include <slab/Slab.h>
20 #include <util/AutoLock.h>
21 #include <util/queue.h>
22 #include <vm/vm_page.h>
23 #include <vm/vm_priv.h>
24 #include <vm/VMAddressSpace.h>
25 #include <vm/VMCache.h>
27 #include "paging/040/M68KPagingMethod040.h"
28 #include "paging/040/M68KPagingStructures040.h"
29 #include "paging/m68k_physical_page_mapper.h"
32 #define TRACE_M68K_VM_TRANSLATION_MAP_040
33 #ifdef TRACE_M68K_VM_TRANSLATION_MAP_040
34 # define TRACE(x...) dprintf(x)
36 # define TRACE(x...) ;
40 M68KVMTranslationMap040::M68KVMTranslationMap040()
42 fPagingStructures(NULL
)
47 M68KVMTranslationMap040::~M68KVMTranslationMap040()
49 if (fPagingStructures
== NULL
)
52 if (fPageMapper
!= NULL
)
53 fPageMapper
->Delete();
55 if (fPagingStructures
->pgroot_virt
!= NULL
) {
56 page_root_entry
*pgroot_virt
= fPagingStructures
->pgroot_virt
;
58 // cycle through and free all of the user space pgdirs & pgtables
59 // since the size of tables don't match B_PAGE_SIZE,
60 // we alloc several at once, based on modulos,
61 // we make sure they are either all in the tree or none.
62 for (uint32 i
= VADDR_TO_PRENT(USER_BASE
);
63 i
<= VADDR_TO_PRENT(USER_BASE
+ (USER_SIZE
- 1)); i
++) {
65 page_directory_entry
*pgdir
;
68 if (PRE_TYPE(pgroot_virt
[i
]) == DT_INVALID
)
70 if (PRE_TYPE(pgroot_virt
[i
]) != DT_ROOT
) {
71 panic("rtdir[%ld]: buggy descriptor type", i
);
74 // XXX:suboptimal (done 8 times)
75 pgdir_pn
= PRE_TO_PN(pgroot_virt
[i
]);
76 dirpage
= vm_lookup_page(pgdir_pn
);
77 pgdir
= &(((page_directory_entry
*)dirpage
)[i
%NUM_DIRTBL_PER_PAGE
]);
79 for (uint32 j
= 0; j
<= NUM_DIRENT_PER_TBL
;
80 j
+=NUM_PAGETBL_PER_PAGE
) {
82 page_table_entry
*pgtbl
;
84 if (PDE_TYPE(pgdir
[j
]) == DT_INVALID
)
86 if (PDE_TYPE(pgdir
[j
]) != DT_DIR
) {
87 panic("pgroot[%ld][%ld]: buggy descriptor type", i
, j
);
90 pgtbl_pn
= PDE_TO_PN(pgdir
[j
]);
91 page
= vm_lookup_page(pgtbl_pn
);
92 pgtbl
= (page_table_entry
*)page
;
95 panic("destroy_tmap: didn't find pgtable page\n");
98 DEBUG_PAGE_ACCESS_START(page
);
99 vm_page_set_state(page
, PAGE_STATE_FREE
);
101 if (((i
+ 1) % NUM_DIRTBL_PER_PAGE
) == 0) {
102 DEBUG_PAGE_ACCESS_END(dirpage
);
103 vm_page_set_state(dirpage
, PAGE_STATE_FREE
);
111 for (uint32 i
= VADDR_TO_PDENT(USER_BASE
);
112 i
<= VADDR_TO_PDENT(USER_BASE
+ (USER_SIZE
- 1)); i
++) {
113 if ((fPagingStructures
->pgdir_virt
[i
] & M68K_PDE_PRESENT
) != 0) {
114 addr_t address
= fPagingStructures
->pgdir_virt
[i
]
115 & M68K_PDE_ADDRESS_MASK
;
116 vm_page
* page
= vm_lookup_page(address
/ B_PAGE_SIZE
);
118 panic("destroy_tmap: didn't find pgtable page\n");
119 DEBUG_PAGE_ACCESS_START(page
);
120 vm_page_set_state(page
, PAGE_STATE_FREE
);
126 fPagingStructures
->RemoveReference();
131 M68KVMTranslationMap040::Init(bool kernel
)
133 TRACE("M68KVMTranslationMap040::Init()\n");
135 M68KVMTranslationMap::Init(kernel
);
137 fPagingStructures
= new(std::nothrow
) M68KPagingStructures040
;
138 if (fPagingStructures
== NULL
)
141 M68KPagingMethod040
* method
= M68KPagingMethod040::Method();
145 // allocate a physical page mapper
146 status_t error
= method
->PhysicalPageMapper()
147 ->CreateTranslationMapPhysicalPageMapper(&fPageMapper
);
151 // allocate the page root
152 page_root_entry
* virtualPageRoot
= (page_root_entry
*)memalign(
153 SIZ_ROOTTBL
, SIZ_ROOTTBL
);
154 if (virtualPageRoot
== NULL
)
157 // look up the page directory's physical address
158 phys_addr_t physicalPageRoot
;
159 vm_get_page_mapping(VMAddressSpace::KernelID(),
160 (addr_t
)virtualPageRoot
, &physicalPageRoot
);
162 fPagingStructures
->Init(virtualPageRoot
, physicalPageRoot
,
163 method
->KernelVirtualPageRoot());
166 // get the physical page mapper
167 fPageMapper
= method
->KernelPhysicalPageMapper();
169 // we already know the kernel pgdir mapping
170 fPagingStructures
->Init(method
->KernelVirtualPageRoot(),
171 method
->KernelPhysicalPageRoot(), NULL
);
179 M68KVMTranslationMap040::MaxPagesNeededToMap(addr_t start
, addr_t end
) const
184 // If start == 0, the actual base address is not yet known to the caller and
185 // we shall assume the worst case.
187 // offset the range so it has the worst possible alignment
188 #warning M68K: FIXME?
189 start
= 1023 * B_PAGE_SIZE
;
190 end
+= 1023 * B_PAGE_SIZE
;
193 pgdirs
= VADDR_TO_PRENT(end
) + 1 - VADDR_TO_PRENT(start
);
194 // how much for page directories
195 need
= (pgdirs
+ NUM_DIRTBL_PER_PAGE
- 1) / NUM_DIRTBL_PER_PAGE
;
196 // and page tables themselves
197 need
= ((pgdirs
* NUM_DIRENT_PER_TBL
) + NUM_PAGETBL_PER_PAGE
- 1) / NUM_PAGETBL_PER_PAGE
;
199 // better rounding when only 1 pgdir
200 // XXX: do better for other cases
203 need
+= (VADDR_TO_PDENT(end
) + 1 - VADDR_TO_PDENT(start
) + NUM_PAGETBL_PER_PAGE
- 1) / NUM_PAGETBL_PER_PAGE
;
211 M68KVMTranslationMap040::Map(addr_t va
, phys_addr_t pa
, uint32 attributes
,
212 uint32 memoryType
, vm_page_reservation
* reservation
)
214 TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa
, va
);
217 dprintf("pgdir at 0x%x\n", pgdir);
218 dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
219 dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
220 dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
221 dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
222 dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
224 page_root_entry
*pr
= fPagingStructures
->pgroot_virt
;
225 page_directory_entry
*pd
;
226 page_table_entry
*pt
;
228 uint32 rindex
, dindex
, pindex
;
231 // check to see if a page directory exists for this range
232 rindex
= VADDR_TO_PRENT(va
);
233 if (PRE_TYPE(pr
[rindex
]) != DT_ROOT
) {
238 // we need to allocate a pgdir group
239 page
= vm_page_allocate_page(reservation
,
240 PAGE_STATE_WIRED
| VM_PAGE_ALLOC_CLEAR
);
242 DEBUG_PAGE_ACCESS_END(page
);
244 pgdir
= (phys_addr_t
)page
->physical_page_number
* B_PAGE_SIZE
;
246 TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir
);
248 // for each pgdir on the allocated page:
249 for (i
= 0; i
< NUM_DIRTBL_PER_PAGE
; i
++) {
250 uint32 aindex
= rindex
& ~(NUM_DIRTBL_PER_PAGE
-1); /* aligned */
251 page_root_entry
*apr
= &pr
[aindex
+ i
];
254 M68KPagingMethod040::PutPageDirInPageRoot(apr
, pgdir
, attributes
255 | ((attributes
& B_USER_PROTECTION
) != 0
256 ? B_WRITE_AREA
: B_KERNEL_WRITE_AREA
));
258 // update any other page roots, if it maps kernel space
259 //XXX: suboptimal, should batch them
260 if ((aindex
+i
) >= FIRST_KERNEL_PGDIR_ENT
&& (aindex
+i
)
261 < (FIRST_KERNEL_PGDIR_ENT
+ NUM_KERNEL_PGDIR_ENTS
))
262 M68KPagingStructures040::UpdateAllPageDirs((aindex
+i
),
269 // now, fill in the pentry
270 //XXX: is this required?
271 Thread
* thread
= thread_get_current_thread();
272 ThreadCPUPinner
pinner(thread
);
274 pd
= (page_directory_entry
*)MapperGetPageTableAt(
275 PRE_TO_PA(pr
[rindex
]));
279 // we want the table at rindex, not at rindex%(tbl/page)
280 //pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
282 // check to see if a page table exists for this range
283 dindex
= VADDR_TO_PDENT(va
);
284 if (PDE_TYPE(pd
[dindex
]) != DT_DIR
) {
289 // we need to allocate a pgtable group
290 page
= vm_page_allocate_page(reservation
,
291 PAGE_STATE_WIRED
| VM_PAGE_ALLOC_CLEAR
);
293 DEBUG_PAGE_ACCESS_END(page
);
295 pgtable
= (phys_addr_t
)page
->physical_page_number
* B_PAGE_SIZE
;
297 TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable
);
299 // for each pgtable on the allocated page:
300 for (i
= 0; i
< NUM_PAGETBL_PER_PAGE
; i
++) {
301 uint32 aindex
= dindex
& ~(NUM_PAGETBL_PER_PAGE
-1); /* aligned */
302 page_directory_entry
*apd
= &pd
[aindex
+ i
];
305 M68KPagingMethod040::PutPageTableInPageDir(apd
, pgtable
, attributes
306 | ((attributes
& B_USER_PROTECTION
) != 0
307 ? B_WRITE_AREA
: B_KERNEL_WRITE_AREA
));
309 // no need to update other page directories for kernel space;
310 // the root-level already point to us.
312 pgtable
+= SIZ_PAGETBL
;
315 #warning M68K: really mean map_count++ ??
319 // now, fill in the pentry
320 //ThreadCPUPinner pinner(thread);
322 pt
= (page_table_entry
*)MapperGetPageTableAt(PDE_TO_PA(pd
[dindex
]));
323 // we want the table at rindex, not at rindex%(tbl/page)
324 //pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
326 pindex
= VADDR_TO_PTENT(va
);
328 ASSERT_PRINT((PTE_TYPE(pt
[pindex
]) != DT_INVALID
) == 0,
329 "virtual address: %#" B_PRIxADDR
", existing pte: %#" B_PRIx32
, va
,
332 M68KPagingMethod040::PutPageTableEntryInTable(&pt
[pindex
], pa
, attributes
,
333 memoryType
, fIsKernelMap
);
337 // Note: We don't need to invalidate the TLB for this address, as previously
338 // the entry was not present and the TLB doesn't cache those entries.
347 M68KVMTranslationMap040::Unmap(addr_t start
, addr_t end
)
349 start
= ROUNDDOWN(start
, B_PAGE_SIZE
);
353 TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start
, end
);
355 page_root_entry
*pr
= fPagingStructures
->pgroot_virt
;
356 page_directory_entry
*pd
;
357 page_table_entry
*pt
;
361 index
= VADDR_TO_PRENT(start
);
362 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
363 // no pagedir here, move the start up to access the next page
365 start
= ROUNDUP(start
+ 1, kPageDirAlignment
);
369 Thread
* thread
= thread_get_current_thread();
370 ThreadCPUPinner
pinner(thread
);
372 pd
= (page_directory_entry
*)MapperGetPageTableAt(
373 PRE_TO_PA(pr
[index
]));
374 // we want the table at rindex, not at rindex%(tbl/page)
375 //pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
378 index
= VADDR_TO_PDENT(start
);
379 if (PDE_TYPE(pd
[index
]) != DT_DIR
) {
380 // no pagedir here, move the start up to access the next page
382 start
= ROUNDUP(start
+ 1, kPageTableAlignment
);
386 pt
= (page_table_entry
*)MapperGetPageTableAt(
387 PDE_TO_PA(pd
[index
]));
388 // we want the table at rindex, not at rindex%(tbl/page)
389 //pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
391 for (index
= VADDR_TO_PTENT(start
);
392 (index
< NUM_PAGEENT_PER_TBL
) && (start
< end
);
393 index
++, start
+= B_PAGE_SIZE
) {
394 if (PTE_TYPE(pt
[index
]) != DT_PAGE
395 && PTE_TYPE(pt
[index
]) != DT_INDIRECT
) {
396 // page mapping not valid
400 TRACE("::Unmap: removing page 0x%lx\n", start
);
402 page_table_entry oldEntry
403 = M68KPagingMethod040::ClearPageTableEntry(&pt
[index
]);
406 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0) {
407 // Note, that we only need to invalidate the address, if the
408 // accessed flags was set, since only then the entry could have
410 InvalidatePage(start
);
413 } while (start
!= 0 && start
< end
);
419 /*! Caller must have locked the cache of the page to be unmapped.
420 This object shouldn't be locked.
423 M68KVMTranslationMap040::UnmapPage(VMArea
* area
, addr_t address
,
424 bool updatePageQueue
)
426 ASSERT(address
% B_PAGE_SIZE
== 0);
428 page_root_entry
* pr
= fPagingStructures
->pgroot_virt
;
430 TRACE("M68KVMTranslationMap040::UnmapPage(%#" B_PRIxADDR
")\n", address
);
432 RecursiveLocker
locker(fLock
);
436 index
= VADDR_TO_PRENT(address
);
437 if (PRE_TYPE(pr
[index
]) == DT_ROOT
)
438 return B_ENTRY_NOT_FOUND
;
440 ThreadCPUPinner
pinner(thread_get_current_thread());
442 page_table_entry
* pd
= (page_table_entry
*)MapperGetPageTableAt(
443 pr
[index
] & M68K_PRE_ADDRESS_MASK
);
445 index
= VADDR_TO_PDENT(address
);
446 if (PDE_TYPE(pd
[index
]) == DT_DIR
)
447 return B_ENTRY_NOT_FOUND
;
449 page_table_entry
* pt
= (page_table_entry
*)MapperGetPageTableAt(
450 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
452 index
= VADDR_TO_PTENT(address
);
453 if (PTE_TYPE(pt
[index
]) == DT_INDIRECT
) {
454 phys_addr_t indirectAddress
= PIE_TO_TA(pt
[index
]);
455 pt
= (page_table_entry
*)MapperGetPageTableAt(
456 PIE_TO_TA(pt
[index
]), true);
457 index
= 0; // single descriptor
460 page_table_entry oldEntry
= M68KPagingMethod040::ClearPageTableEntry(
465 if (PTE_TYPE(oldEntry
) != DT_PAGE
) {
466 // page mapping not valid
467 return B_ENTRY_NOT_FOUND
;
472 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0) {
473 // Note, that we only need to invalidate the address, if the
474 // accessed flags was set, since only then the entry could have been
476 InvalidatePage(address
);
479 // NOTE: Between clearing the page table entry and Flush() other
480 // processors (actually even this processor with another thread of the
481 // same team) could still access the page in question via their cached
482 // entry. We can obviously lose a modified flag in this case, with the
483 // effect that the page looks unmodified (and might thus be recycled),
484 // but is actually modified.
485 // In most cases this is harmless, but for vm_remove_all_page_mappings()
486 // this is actually a problem.
487 // Interestingly FreeBSD seems to ignore this problem as well
488 // (cf. pmap_remove_all()), unless I've missed something.
492 // PageUnmapped() will unlock for us
494 PageUnmapped(area
, (oldEntry
& M68K_PTE_ADDRESS_MASK
) / B_PAGE_SIZE
,
495 (oldEntry
& M68K_PTE_ACCESSED
) != 0, (oldEntry
& M68K_PTE_DIRTY
) != 0,
503 M68KVMTranslationMap040::UnmapPages(VMArea
* area
, addr_t base
, size_t size
,
504 bool updatePageQueue
)
512 addr_t end
= base
+ size
- 1;
514 TRACE("M68KVMTranslationMap040::UnmapPages(%p, %#" B_PRIxADDR
", %#"
515 B_PRIxADDR
")\n", area
, start
, end
);
517 page_root_entry
* pr
= fPagingStructures
->pgroot_virt
;
519 VMAreaMappings queue
;
521 RecursiveLocker
locker(fLock
);
524 index
= VADDR_TO_PRENT(start
);
525 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
526 // no page table here, move the start up to access the next page
528 start
= ROUNDUP(start
+ 1, kPageDirAlignment
);
532 Thread
* thread
= thread_get_current_thread();
533 ThreadCPUPinner
pinner(thread
);
535 page_table_entry
* pd
= (page_directory_entry
*)MapperGetPageTableAt(
536 pr
[index
] & M68K_PRE_ADDRESS_MASK
);
538 index
= VADDR_TO_PDENT(start
);
539 if (PDE_TYPE(pd
[index
]) != DT_DIR
) {
540 // no page table here, move the start up to access the next page
542 start
= ROUNDUP(start
+ 1, kPageTableAlignment
);
546 page_table_entry
* pt
= (page_table_entry
*)MapperGetPageTableAt(
547 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
549 for (index
= VADDR_TO_PTENT(start
); (index
< 1024) && (start
< end
);
550 index
++, start
+= B_PAGE_SIZE
) {
551 page_table_entry
*e
= &pt
[index
];
552 // fetch indirect descriptor
553 //XXX:clear the indirect descriptor too??
554 if (PTE_TYPE(pt
[index
]) == DT_INDIRECT
) {
555 phys_addr_t indirectAddress
= PIE_TO_TA(pt
[index
]);
556 e
= (page_table_entry
*)MapperGetPageTableAt(
557 PIE_TO_TA(pt
[index
]));
560 page_table_entry oldEntry
561 = M68KPagingMethod040::ClearPageTableEntry(e
);
562 if (PTE_TYPE(oldEntry
) != DT_PAGE
)
567 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0) {
568 // Note, that we only need to invalidate the address, if the
569 // accessed flags was set, since only then the entry could have
571 InvalidatePage(start
);
574 if (area
->cache_type
!= CACHE_TYPE_DEVICE
) {
576 vm_page
* page
= vm_lookup_page(
577 (oldEntry
& M68K_PTE_ADDRESS_MASK
) / B_PAGE_SIZE
);
578 ASSERT(page
!= NULL
);
580 DEBUG_PAGE_ACCESS_START(page
);
582 // transfer the accessed/dirty flags to the page
583 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0)
584 page
->accessed
= true;
585 if ((oldEntry
& M68K_PTE_DIRTY
) != 0)
586 page
->modified
= true;
588 // remove the mapping object/decrement the wired_count of the
590 if (area
->wiring
== B_NO_LOCK
) {
591 vm_page_mapping
* mapping
= NULL
;
592 vm_page_mappings::Iterator iterator
593 = page
->mappings
.GetIterator();
594 while ((mapping
= iterator
.Next()) != NULL
) {
595 if (mapping
->area
== area
)
599 ASSERT(mapping
!= NULL
);
601 area
->mappings
.Remove(mapping
);
602 page
->mappings
.Remove(mapping
);
605 page
->DecrementWiredCount();
607 if (!page
->IsMapped()) {
608 atomic_add(&gMappedPagesCount
, -1);
610 if (updatePageQueue
) {
611 if (page
->Cache()->temporary
)
612 vm_page_set_state(page
, PAGE_STATE_INACTIVE
);
613 else if (page
->modified
)
614 vm_page_set_state(page
, PAGE_STATE_MODIFIED
);
616 vm_page_set_state(page
, PAGE_STATE_CACHED
);
620 DEBUG_PAGE_ACCESS_END(page
);
625 // flush explicitly, since we directly use the lock
626 } while (start
!= 0 && start
< end
);
628 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
629 // really critical here, as in all cases this method is used, the unmapped
630 // area range is unmapped for good (resized/cut) and the pages will likely
635 // free removed mappings
636 bool isKernelSpace
= area
->address_space
== VMAddressSpace::Kernel();
637 uint32 freeFlags
= CACHE_DONT_WAIT_FOR_MEMORY
638 | (isKernelSpace
? CACHE_DONT_LOCK_KERNEL_SPACE
: 0);
639 while (vm_page_mapping
* mapping
= queue
.RemoveHead())
640 object_cache_free(gPageMappingsObjectCache
, mapping
, freeFlags
);
645 M68KVMTranslationMap040::UnmapArea(VMArea
* area
, bool deletingAddressSpace
,
646 bool ignoreTopCachePageFlags
)
648 if (area
->cache_type
== CACHE_TYPE_DEVICE
|| area
->wiring
!= B_NO_LOCK
) {
649 M68KVMTranslationMap040::UnmapPages(area
, area
->Base(), area
->Size(),
654 bool unmapPages
= !deletingAddressSpace
|| !ignoreTopCachePageFlags
;
656 page_root_entry
* pr
= fPagingStructures
->pgroot_virt
;
658 RecursiveLocker
locker(fLock
);
660 VMAreaMappings mappings
;
661 mappings
.MoveFrom(&area
->mappings
);
663 for (VMAreaMappings::Iterator it
= mappings
.GetIterator();
664 vm_page_mapping
* mapping
= it
.Next();) {
665 vm_page
* page
= mapping
->page
;
666 page
->mappings
.Remove(mapping
);
668 VMCache
* cache
= page
->Cache();
670 bool pageFullyUnmapped
= false;
671 if (!page
->IsMapped()) {
672 atomic_add(&gMappedPagesCount
, -1);
673 pageFullyUnmapped
= true;
676 if (unmapPages
|| cache
!= area
->cache
) {
677 addr_t address
= area
->Base()
678 + ((page
->cache_offset
* B_PAGE_SIZE
) - area
->cache_offset
);
681 index
= VADDR_TO_PRENT(address
);
682 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
683 panic("page %p has mapping for area %p (%#" B_PRIxADDR
"), but "
684 "has no page root entry", page
, area
, address
);
688 ThreadCPUPinner
pinner(thread_get_current_thread());
690 page_directory_entry
* pd
691 = (page_directory_entry
*)MapperGetPageTableAt(
692 pr
[index
] & M68K_PRE_ADDRESS_MASK
);
694 index
= VADDR_TO_PDENT(address
);
695 if (PDE_TYPE(pr
[index
]) != DT_DIR
) {
696 panic("page %p has mapping for area %p (%#" B_PRIxADDR
"), but "
697 "has no page dir entry", page
, area
, address
);
702 = (page_table_entry
*)MapperGetPageTableAt(
703 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
705 //XXX:M68K: DT_INDIRECT here?
707 page_table_entry oldEntry
708 = M68KPagingMethod040::ClearPageTableEntry(
709 &pt
[VADDR_TO_PTENT(address
)]);
713 if (PTE_TYPE(oldEntry
) != DT_PAGE
) {
714 panic("page %p has mapping for area %p (%#" B_PRIxADDR
"), but "
715 "has no page table entry", page
, area
, address
);
719 // transfer the accessed/dirty flags to the page and invalidate
720 // the mapping, if necessary
721 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0) {
722 page
->accessed
= true;
724 if (!deletingAddressSpace
)
725 InvalidatePage(address
);
728 if ((oldEntry
& M68K_PTE_DIRTY
) != 0)
729 page
->modified
= true;
731 if (pageFullyUnmapped
) {
732 DEBUG_PAGE_ACCESS_START(page
);
734 if (cache
->temporary
)
735 vm_page_set_state(page
, PAGE_STATE_INACTIVE
);
736 else if (page
->modified
)
737 vm_page_set_state(page
, PAGE_STATE_MODIFIED
);
739 vm_page_set_state(page
, PAGE_STATE_CACHED
);
741 DEBUG_PAGE_ACCESS_END(page
);
749 // flush explicitely, since we directly use the lock
753 bool isKernelSpace
= area
->address_space
== VMAddressSpace::Kernel();
754 uint32 freeFlags
= CACHE_DONT_WAIT_FOR_MEMORY
755 | (isKernelSpace
? CACHE_DONT_LOCK_KERNEL_SPACE
: 0);
756 while (vm_page_mapping
* mapping
= mappings
.RemoveHead())
757 object_cache_free(gPageMappingsObjectCache
, mapping
, freeFlags
);
762 M68KVMTranslationMap040::Query(addr_t va
, phys_addr_t
*_physical
,
765 // default the flags to not present
768 TRACE("040::Query(0x%lx,)\n", va
);
770 int index
= VADDR_TO_PRENT(va
);
771 page_root_entry
*pr
= fPagingStructures
->pgroot_virt
;
772 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
777 Thread
* thread
= thread_get_current_thread();
778 ThreadCPUPinner
pinner(thread
);
780 page_directory_entry
* pd
= (page_directory_entry
*)MapperGetPageTableAt(
781 pr
[index
] & M68K_PDE_ADDRESS_MASK
);
783 index
= VADDR_TO_PDENT(va
);
784 if (PDE_TYPE(pd
[index
]) != DT_DIR
) {
789 page_table_entry
* pt
= (page_table_entry
*)MapperGetPageTableAt(
790 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
792 index
= VADDR_TO_PTENT(va
);
793 if (PTE_TYPE(pt
[index
]) == DT_INDIRECT
) {
794 pt
= (page_table_entry
*)MapperGetPageTableAt(
795 pt
[index
] & M68K_PIE_ADDRESS_MASK
);
799 page_table_entry entry
= pt
[index
];
801 *_physical
= entry
& M68K_PTE_ADDRESS_MASK
;
803 // read in the page state flags
804 if ((entry
& M68K_PTE_SUPERVISOR
) == 0) {
805 *_flags
|= ((entry
& M68K_PTE_READONLY
) == 0 ? B_WRITE_AREA
: 0)
809 *_flags
|= ((entry
& M68K_PTE_READONLY
) == 0 ? B_KERNEL_WRITE_AREA
: 0)
811 | ((entry
& M68K_PTE_DIRTY
) != 0 ? PAGE_MODIFIED
: 0)
812 | ((entry
& M68K_PTE_ACCESSED
) != 0 ? PAGE_ACCESSED
: 0)
813 | ((PTE_TYPE(entry
) == DT_PAGE
) ? PAGE_PRESENT
: 0);
817 TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical
, va
);
824 M68KVMTranslationMap040::QueryInterrupt(addr_t va
, phys_addr_t
*_physical
,
829 TRACE("040::QueryInterrupt(0x%lx,)\n", va
);
831 int index
= VADDR_TO_PRENT(va
);
832 page_root_entry
* pr
= fPagingStructures
->pgroot_virt
;
833 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
838 // map page table entry
839 phys_addr_t ppr
= pr
[index
] & M68K_PRE_ADDRESS_MASK
;
840 page_directory_entry
* pd
= (page_directory_entry
*)((char *)
841 M68KPagingMethod040::Method()->PhysicalPageMapper()
842 ->InterruptGetPageTableAt(ppr
& ~(B_PAGE_SIZE
-1))
843 + (ppr
% B_PAGE_SIZE
));
845 index
= VADDR_TO_PDENT(va
);
846 if (PDE_TYPE(pd
[index
]) != DT_DIR
) {
851 phys_addr_t ppd
= pd
[index
] & M68K_PDE_ADDRESS_MASK
;
852 page_table_entry
* pt
= (page_table_entry
*)((char *)
853 M68KPagingMethod040::Method()->PhysicalPageMapper()
854 ->InterruptGetPageTableAt(ppd
& ~(B_PAGE_SIZE
-1))
855 + (ppd
% B_PAGE_SIZE
));
857 index
= VADDR_TO_PTENT(va
);
858 if (PTE_TYPE(pt
[index
]) == DT_INDIRECT
) {
859 phys_addr_t ppt
= pt
[index
] & M68K_PIE_ADDRESS_MASK
;
860 pt
= (page_table_entry
*)((char *)
861 M68KPagingMethod040::Method()->PhysicalPageMapper()
862 ->InterruptGetPageTableAt(ppt
& ~(B_PAGE_SIZE
-1))
863 + (ppt
% B_PAGE_SIZE
));
867 page_table_entry entry
= pt
[index
];
869 *_physical
= entry
& M68K_PTE_ADDRESS_MASK
;
871 // read in the page state flags
872 if ((entry
& M68K_PTE_SUPERVISOR
) == 0) {
873 *_flags
|= ((entry
& M68K_PTE_READONLY
) == 0 ? B_WRITE_AREA
: 0)
877 *_flags
|= ((entry
& M68K_PTE_READONLY
) == 0 ? B_KERNEL_WRITE_AREA
: 0)
879 | ((entry
& M68K_PTE_DIRTY
) != 0 ? PAGE_MODIFIED
: 0)
880 | ((entry
& M68K_PTE_ACCESSED
) != 0 ? PAGE_ACCESSED
: 0)
881 | ((PTE_TYPE(entry
) == DT_PAGE
) ? PAGE_PRESENT
: 0);
888 M68KVMTranslationMap040::Protect(addr_t start
, addr_t end
, uint32 attributes
,
891 start
= ROUNDDOWN(start
, B_PAGE_SIZE
);
895 TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start
, end
,
900 // compute protection flags
901 uint32 newProtectionFlags
= 0;
902 if ((attributes
& B_USER_PROTECTION
) != 0) {
903 newProtectionFlags
= M68K_PTE_USER
;
904 if ((attributes
& B_WRITE_AREA
) != 0)
905 newProtectionFlags
|= M68K_PTE_WRITABLE
;
906 } else if ((attributes
& B_KERNEL_WRITE_AREA
) != 0)
907 newProtectionFlags
= M68K_PTE_WRITABLE
;
909 page_directory_entry
*pd
= fPagingStructures
->pgdir_virt
;
912 int index
= VADDR_TO_PDENT(start
);
913 if ((pd
[index
] & M68K_PDE_PRESENT
) == 0) {
914 // no page table here, move the start up to access the next page
916 start
= ROUNDUP(start
+ 1, kPageTableAlignment
);
920 struct thread
* thread
= thread_get_current_thread();
921 ThreadCPUPinner
pinner(thread
);
923 page_table_entry
* pt
= (page_table_entry
*)MapperGetPageTableAt(
924 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
926 for (index
= VADDR_TO_PTENT(start
); index
< 1024 && start
< end
;
927 index
++, start
+= B_PAGE_SIZE
) {
928 page_table_entry entry
= pt
[index
];
929 if ((entry
& M68K_PTE_PRESENT
) == 0) {
930 // page mapping not valid
934 TRACE("protect_tmap: protect page 0x%lx\n", start
);
936 // set the new protection flags -- we want to do that atomically,
937 // without changing the accessed or dirty flag
938 page_table_entry oldEntry
;
940 oldEntry
= M68KPagingMethod040::TestAndSetPageTableEntry(
942 (entry
& ~(M68K_PTE_PROTECTION_MASK
943 | M68K_PTE_MEMORY_TYPE_MASK
))
945 | M68KPagingMethod040::MemoryTypeToPageTableEntryFlags(
948 if (oldEntry
== entry
)
953 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0) {
954 // Note, that we only need to invalidate the address, if the
955 // accessed flag was set, since only then the entry could have
957 InvalidatePage(start
);
960 } while (start
!= 0 && start
< end
);
967 M68KVMTranslationMap040::ClearFlags(addr_t va
, uint32 flags
)
971 int index
= VADDR_TO_PDENT(va
);
972 page_directory_entry
* pd
= fPagingStructures
->pgdir_virt
;
973 if ((pd
[index
] & M68K_PDE_PRESENT
) == 0) {
978 uint32 flagsToClear
= ((flags
& PAGE_MODIFIED
) ? M68K_PTE_DIRTY
: 0)
979 | ((flags
& PAGE_ACCESSED
) ? M68K_PTE_ACCESSED
: 0);
981 struct thread
* thread
= thread_get_current_thread();
982 ThreadCPUPinner
pinner(thread
);
984 page_table_entry
* pt
= (page_table_entry
*)MapperGetPageTableAt(
985 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
986 index
= VADDR_TO_PTENT(va
);
988 // clear out the flags we've been requested to clear
989 page_table_entry oldEntry
990 = M68KPagingMethod040::ClearPageTableEntryFlags(&pt
[index
],
995 if ((oldEntry
& flagsToClear
) != 0)
1004 M68KVMTranslationMap040::ClearAccessedAndModified(VMArea
* area
, addr_t address
,
1005 bool unmapIfUnaccessed
, bool& _modified
)
1007 ASSERT(address
% B_PAGE_SIZE
== 0);
1009 page_root_entry
* pr
= fPagingStructures
->pgroot_virt
;
1011 TRACE("M68KVMTranslationMap040::ClearAccessedAndModified(%#" B_PRIxADDR
1015 RecursiveLocker
locker(fLock
);
1017 int index
= VADDR_TO_PDENT(address
);
1018 if ((pd
[index
] & M68K_PDE_PRESENT
) == 0)
1021 ThreadCPUPinner
pinner(thread_get_current_thread());
1023 page_table_entry
* pt
= (page_table_entry
*)MapperGetPageTableAt(
1024 pd
[index
] & M68K_PDE_ADDRESS_MASK
);
1026 index
= VADDR_TO_PTENT(address
);
1029 page_table_entry oldEntry
;
1031 if (unmapIfUnaccessed
) {
1033 oldEntry
= pt
[index
];
1034 if ((oldEntry
& M68K_PTE_PRESENT
) == 0) {
1035 // page mapping not valid
1039 if (oldEntry
& M68K_PTE_ACCESSED
) {
1040 // page was accessed -- just clear the flags
1041 oldEntry
= M68KPagingMethod040::ClearPageTableEntryFlags(
1042 &pt
[index
], M68K_PTE_ACCESSED
| M68K_PTE_DIRTY
);
1046 // page hasn't been accessed -- unmap it
1047 if (M68KPagingMethod040::TestAndSetPageTableEntry(&pt
[index
], 0,
1048 oldEntry
) == oldEntry
) {
1052 // something changed -- check again
1055 oldEntry
= M68KPagingMethod040::ClearPageTableEntryFlags(&pt
[index
],
1056 M68K_PTE_ACCESSED
| M68K_PTE_DIRTY
);
1061 _modified
= (oldEntry
& M68K_PTE_DIRTY
) != 0;
1063 if ((oldEntry
& M68K_PTE_ACCESSED
) != 0) {
1064 // Note, that we only need to invalidate the address, if the
1065 // accessed flags was set, since only then the entry could have been
1067 InvalidatePage(address
);
1074 if (!unmapIfUnaccessed
)
1077 // We have unmapped the address. Do the "high level" stuff.
1082 // UnaccessedPageUnmapped() will unlock for us
1084 UnaccessedPageUnmapped(area
,
1085 (oldEntry
& M68K_PTE_ADDRESS_MASK
) / B_PAGE_SIZE
);
1092 M68KPagingStructures
*
1093 M68KVMTranslationMap040::PagingStructures() const
1095 return fPagingStructures
;
1100 M68KVMTranslationMap040::MapperGetPageTableAt(phys_addr_t physicalAddress
,
1103 // M68K fits several page tables in a single page...
1104 uint32 offset
= physicalAddress
% B_PAGE_SIZE
;
1105 ASSERT((indirect
&& (offset
% 4) == 0) || (offset
% SIZ_ROOTTBL
) == 0);
1106 physicalAddress
&= ~(B_PAGE_SIZE
-1);
1107 void *va
= fPageMapper
->GetPageTableAt(physicalAddress
);
1108 return (void *)((addr_t
)va
+ offset
);