vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / arch / m68k / paging / 040 / M68KVMTranslationMap040.cpp
blob876d50eebef8a8235e145e6ca1de56fa921b893b
1 /*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "paging/040/M68KVMTranslationMap040.h"
13 #include <stdlib.h>
14 #include <string.h>
16 #include <int.h>
17 #include <thread.h>
18 #include <slab/Slab.h>
19 #include <smp.h>
20 #include <util/AutoLock.h>
21 #include <util/queue.h>
22 #include <vm/vm_page.h>
23 #include <vm/vm_priv.h>
24 #include <vm/VMAddressSpace.h>
25 #include <vm/VMCache.h>
27 #include "paging/040/M68KPagingMethod040.h"
28 #include "paging/040/M68KPagingStructures040.h"
29 #include "paging/m68k_physical_page_mapper.h"
32 #define TRACE_M68K_VM_TRANSLATION_MAP_040
33 #ifdef TRACE_M68K_VM_TRANSLATION_MAP_040
34 # define TRACE(x...) dprintf(x)
35 #else
36 # define TRACE(x...) ;
37 #endif
40 M68KVMTranslationMap040::M68KVMTranslationMap040()
42 fPagingStructures(NULL)
47 M68KVMTranslationMap040::~M68KVMTranslationMap040()
49 if (fPagingStructures == NULL)
50 return;
52 if (fPageMapper != NULL)
53 fPageMapper->Delete();
55 if (fPagingStructures->pgroot_virt != NULL) {
56 page_root_entry *pgroot_virt = fPagingStructures->pgroot_virt;
58 // cycle through and free all of the user space pgdirs & pgtables
59 // since the size of tables don't match B_PAGE_SIZE,
60 // we alloc several at once, based on modulos,
61 // we make sure they are either all in the tree or none.
62 for (uint32 i = VADDR_TO_PRENT(USER_BASE);
63 i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) {
64 addr_t pgdir_pn;
65 page_directory_entry *pgdir;
66 vm_page *dirpage;
68 if (PRE_TYPE(pgroot_virt[i]) == DT_INVALID)
69 continue;
70 if (PRE_TYPE(pgroot_virt[i]) != DT_ROOT) {
71 panic("rtdir[%ld]: buggy descriptor type", i);
72 return;
74 // XXX:suboptimal (done 8 times)
75 pgdir_pn = PRE_TO_PN(pgroot_virt[i]);
76 dirpage = vm_lookup_page(pgdir_pn);
77 pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]);
79 for (uint32 j = 0; j <= NUM_DIRENT_PER_TBL;
80 j+=NUM_PAGETBL_PER_PAGE) {
81 addr_t pgtbl_pn;
82 page_table_entry *pgtbl;
83 vm_page *page;
84 if (PDE_TYPE(pgdir[j]) == DT_INVALID)
85 continue;
86 if (PDE_TYPE(pgdir[j]) != DT_DIR) {
87 panic("pgroot[%ld][%ld]: buggy descriptor type", i, j);
88 return;
90 pgtbl_pn = PDE_TO_PN(pgdir[j]);
91 page = vm_lookup_page(pgtbl_pn);
92 pgtbl = (page_table_entry *)page;
94 if (!page) {
95 panic("destroy_tmap: didn't find pgtable page\n");
96 return;
98 DEBUG_PAGE_ACCESS_START(page);
99 vm_page_set_state(page, PAGE_STATE_FREE);
101 if (((i + 1) % NUM_DIRTBL_PER_PAGE) == 0) {
102 DEBUG_PAGE_ACCESS_END(dirpage);
103 vm_page_set_state(dirpage, PAGE_STATE_FREE);
109 #if 0
110 //X86
111 for (uint32 i = VADDR_TO_PDENT(USER_BASE);
112 i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
113 if ((fPagingStructures->pgdir_virt[i] & M68K_PDE_PRESENT) != 0) {
114 addr_t address = fPagingStructures->pgdir_virt[i]
115 & M68K_PDE_ADDRESS_MASK;
116 vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
117 if (!page)
118 panic("destroy_tmap: didn't find pgtable page\n");
119 DEBUG_PAGE_ACCESS_START(page);
120 vm_page_set_state(page, PAGE_STATE_FREE);
123 #endif
126 fPagingStructures->RemoveReference();
130 status_t
131 M68KVMTranslationMap040::Init(bool kernel)
133 TRACE("M68KVMTranslationMap040::Init()\n");
135 M68KVMTranslationMap::Init(kernel);
137 fPagingStructures = new(std::nothrow) M68KPagingStructures040;
138 if (fPagingStructures == NULL)
139 return B_NO_MEMORY;
141 M68KPagingMethod040* method = M68KPagingMethod040::Method();
143 if (!kernel) {
144 // user
145 // allocate a physical page mapper
146 status_t error = method->PhysicalPageMapper()
147 ->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
148 if (error != B_OK)
149 return error;
151 // allocate the page root
152 page_root_entry* virtualPageRoot = (page_root_entry*)memalign(
153 SIZ_ROOTTBL, SIZ_ROOTTBL);
154 if (virtualPageRoot == NULL)
155 return B_NO_MEMORY;
157 // look up the page directory's physical address
158 phys_addr_t physicalPageRoot;
159 vm_get_page_mapping(VMAddressSpace::KernelID(),
160 (addr_t)virtualPageRoot, &physicalPageRoot);
162 fPagingStructures->Init(virtualPageRoot, physicalPageRoot,
163 method->KernelVirtualPageRoot());
164 } else {
165 // kernel
166 // get the physical page mapper
167 fPageMapper = method->KernelPhysicalPageMapper();
169 // we already know the kernel pgdir mapping
170 fPagingStructures->Init(method->KernelVirtualPageRoot(),
171 method->KernelPhysicalPageRoot(), NULL);
174 return B_OK;
178 size_t
179 M68KVMTranslationMap040::MaxPagesNeededToMap(addr_t start, addr_t end) const
181 size_t need;
182 size_t pgdirs;
184 // If start == 0, the actual base address is not yet known to the caller and
185 // we shall assume the worst case.
186 if (start == 0) {
187 // offset the range so it has the worst possible alignment
188 #warning M68K: FIXME?
189 start = 1023 * B_PAGE_SIZE;
190 end += 1023 * B_PAGE_SIZE;
193 pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start);
194 // how much for page directories
195 need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE;
196 // and page tables themselves
197 need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
199 // better rounding when only 1 pgdir
200 // XXX: do better for other cases
201 if (pgdirs == 1) {
202 need = 1;
203 need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
206 return need;
210 status_t
211 M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
212 uint32 memoryType, vm_page_reservation* reservation)
214 TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);
217 dprintf("pgdir at 0x%x\n", pgdir);
218 dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
219 dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
220 dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
221 dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
222 dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
224 page_root_entry *pr = fPagingStructures->pgroot_virt;
225 page_directory_entry *pd;
226 page_table_entry *pt;
227 addr_t pd_pg, pt_pg;
228 uint32 rindex, dindex, pindex;
231 // check to see if a page directory exists for this range
232 rindex = VADDR_TO_PRENT(va);
233 if (PRE_TYPE(pr[rindex]) != DT_ROOT) {
234 phys_addr_t pgdir;
235 vm_page *page;
236 uint32 i;
238 // we need to allocate a pgdir group
239 page = vm_page_allocate_page(reservation,
240 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
242 DEBUG_PAGE_ACCESS_END(page);
244 pgdir = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
246 TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir);
248 // for each pgdir on the allocated page:
249 for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
250 uint32 aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
251 page_root_entry *apr = &pr[aindex + i];
253 // put in the pgroot
254 M68KPagingMethod040::PutPageDirInPageRoot(apr, pgdir, attributes
255 | ((attributes & B_USER_PROTECTION) != 0
256 ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
258 // update any other page roots, if it maps kernel space
259 //XXX: suboptimal, should batch them
260 if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT && (aindex+i)
261 < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
262 M68KPagingStructures040::UpdateAllPageDirs((aindex+i),
263 pr[aindex+i]);
265 pgdir += SIZ_DIRTBL;
267 fMapCount++;
269 // now, fill in the pentry
270 //XXX: is this required?
271 Thread* thread = thread_get_current_thread();
272 ThreadCPUPinner pinner(thread);
274 pd = (page_directory_entry*)MapperGetPageTableAt(
275 PRE_TO_PA(pr[rindex]));
277 //pinner.Unlock();
279 // we want the table at rindex, not at rindex%(tbl/page)
280 //pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
282 // check to see if a page table exists for this range
283 dindex = VADDR_TO_PDENT(va);
284 if (PDE_TYPE(pd[dindex]) != DT_DIR) {
285 phys_addr_t pgtable;
286 vm_page *page;
287 uint32 i;
289 // we need to allocate a pgtable group
290 page = vm_page_allocate_page(reservation,
291 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
293 DEBUG_PAGE_ACCESS_END(page);
295 pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
297 TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable);
299 // for each pgtable on the allocated page:
300 for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
301 uint32 aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
302 page_directory_entry *apd = &pd[aindex + i];
304 // put in the pgdir
305 M68KPagingMethod040::PutPageTableInPageDir(apd, pgtable, attributes
306 | ((attributes & B_USER_PROTECTION) != 0
307 ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
309 // no need to update other page directories for kernel space;
310 // the root-level already point to us.
312 pgtable += SIZ_PAGETBL;
315 #warning M68K: really mean map_count++ ??
316 fMapCount++;
319 // now, fill in the pentry
320 //ThreadCPUPinner pinner(thread);
322 pt = (page_table_entry*)MapperGetPageTableAt(PDE_TO_PA(pd[dindex]));
323 // we want the table at rindex, not at rindex%(tbl/page)
324 //pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
326 pindex = VADDR_TO_PTENT(va);
328 ASSERT_PRINT((PTE_TYPE(pt[pindex]) != DT_INVALID) == 0,
329 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
330 pt[pindex]);
332 M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
333 memoryType, fIsKernelMap);
335 pinner.Unlock();
337 // Note: We don't need to invalidate the TLB for this address, as previously
338 // the entry was not present and the TLB doesn't cache those entries.
340 fMapCount++;
342 return B_OK;
346 status_t
347 M68KVMTranslationMap040::Unmap(addr_t start, addr_t end)
349 start = ROUNDDOWN(start, B_PAGE_SIZE);
350 if (start >= end)
351 return B_OK;
353 TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
355 page_root_entry *pr = fPagingStructures->pgroot_virt;
356 page_directory_entry *pd;
357 page_table_entry *pt;
358 int index;
360 do {
361 index = VADDR_TO_PRENT(start);
362 if (PRE_TYPE(pr[index]) != DT_ROOT) {
363 // no pagedir here, move the start up to access the next page
364 // dir group
365 start = ROUNDUP(start + 1, kPageDirAlignment);
366 continue;
369 Thread* thread = thread_get_current_thread();
370 ThreadCPUPinner pinner(thread);
372 pd = (page_directory_entry*)MapperGetPageTableAt(
373 PRE_TO_PA(pr[index]));
374 // we want the table at rindex, not at rindex%(tbl/page)
375 //pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
378 index = VADDR_TO_PDENT(start);
379 if (PDE_TYPE(pd[index]) != DT_DIR) {
380 // no pagedir here, move the start up to access the next page
381 // table group
382 start = ROUNDUP(start + 1, kPageTableAlignment);
383 continue;
386 pt = (page_table_entry*)MapperGetPageTableAt(
387 PDE_TO_PA(pd[index]));
388 // we want the table at rindex, not at rindex%(tbl/page)
389 //pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
391 for (index = VADDR_TO_PTENT(start);
392 (index < NUM_PAGEENT_PER_TBL) && (start < end);
393 index++, start += B_PAGE_SIZE) {
394 if (PTE_TYPE(pt[index]) != DT_PAGE
395 && PTE_TYPE(pt[index]) != DT_INDIRECT) {
396 // page mapping not valid
397 continue;
400 TRACE("::Unmap: removing page 0x%lx\n", start);
402 page_table_entry oldEntry
403 = M68KPagingMethod040::ClearPageTableEntry(&pt[index]);
404 fMapCount--;
406 if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
407 // Note, that we only need to invalidate the address, if the
408 // accessed flags was set, since only then the entry could have
409 // been in any TLB.
410 InvalidatePage(start);
413 } while (start != 0 && start < end);
415 return B_OK;
419 /*! Caller must have locked the cache of the page to be unmapped.
420 This object shouldn't be locked.
422 status_t
423 M68KVMTranslationMap040::UnmapPage(VMArea* area, addr_t address,
424 bool updatePageQueue)
426 ASSERT(address % B_PAGE_SIZE == 0);
428 page_root_entry* pr = fPagingStructures->pgroot_virt;
430 TRACE("M68KVMTranslationMap040::UnmapPage(%#" B_PRIxADDR ")\n", address);
432 RecursiveLocker locker(fLock);
434 int index;
436 index = VADDR_TO_PRENT(address);
437 if (PRE_TYPE(pr[index]) == DT_ROOT)
438 return B_ENTRY_NOT_FOUND;
440 ThreadCPUPinner pinner(thread_get_current_thread());
442 page_table_entry* pd = (page_table_entry*)MapperGetPageTableAt(
443 pr[index] & M68K_PRE_ADDRESS_MASK);
445 index = VADDR_TO_PDENT(address);
446 if (PDE_TYPE(pd[index]) == DT_DIR)
447 return B_ENTRY_NOT_FOUND;
449 page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
450 pd[index] & M68K_PDE_ADDRESS_MASK);
452 index = VADDR_TO_PTENT(address);
453 if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
454 phys_addr_t indirectAddress = PIE_TO_TA(pt[index]);
455 pt = (page_table_entry*)MapperGetPageTableAt(
456 PIE_TO_TA(pt[index]), true);
457 index = 0; // single descriptor
460 page_table_entry oldEntry = M68KPagingMethod040::ClearPageTableEntry(
461 &pt[index]);
463 pinner.Unlock();
465 if (PTE_TYPE(oldEntry) != DT_PAGE) {
466 // page mapping not valid
467 return B_ENTRY_NOT_FOUND;
470 fMapCount--;
472 if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
473 // Note, that we only need to invalidate the address, if the
474 // accessed flags was set, since only then the entry could have been
475 // in any TLB.
476 InvalidatePage(address);
477 Flush();
479 // NOTE: Between clearing the page table entry and Flush() other
480 // processors (actually even this processor with another thread of the
481 // same team) could still access the page in question via their cached
482 // entry. We can obviously lose a modified flag in this case, with the
483 // effect that the page looks unmodified (and might thus be recycled),
484 // but is actually modified.
485 // In most cases this is harmless, but for vm_remove_all_page_mappings()
486 // this is actually a problem.
487 // Interestingly FreeBSD seems to ignore this problem as well
488 // (cf. pmap_remove_all()), unless I've missed something.
491 locker.Detach();
492 // PageUnmapped() will unlock for us
494 PageUnmapped(area, (oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
495 (oldEntry & M68K_PTE_ACCESSED) != 0, (oldEntry & M68K_PTE_DIRTY) != 0,
496 updatePageQueue);
498 return B_OK;
502 void
503 M68KVMTranslationMap040::UnmapPages(VMArea* area, addr_t base, size_t size,
504 bool updatePageQueue)
506 int index;
508 if (size == 0)
509 return;
511 addr_t start = base;
512 addr_t end = base + size - 1;
514 TRACE("M68KVMTranslationMap040::UnmapPages(%p, %#" B_PRIxADDR ", %#"
515 B_PRIxADDR ")\n", area, start, end);
517 page_root_entry* pr = fPagingStructures->pgroot_virt;
519 VMAreaMappings queue;
521 RecursiveLocker locker(fLock);
523 do {
524 index = VADDR_TO_PRENT(start);
525 if (PRE_TYPE(pr[index]) != DT_ROOT) {
526 // no page table here, move the start up to access the next page
527 // table
528 start = ROUNDUP(start + 1, kPageDirAlignment);
529 continue;
532 Thread* thread = thread_get_current_thread();
533 ThreadCPUPinner pinner(thread);
535 page_table_entry* pd = (page_directory_entry*)MapperGetPageTableAt(
536 pr[index] & M68K_PRE_ADDRESS_MASK);
538 index = VADDR_TO_PDENT(start);
539 if (PDE_TYPE(pd[index]) != DT_DIR) {
540 // no page table here, move the start up to access the next page
541 // table
542 start = ROUNDUP(start + 1, kPageTableAlignment);
543 continue;
546 page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
547 pd[index] & M68K_PDE_ADDRESS_MASK);
549 for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
550 index++, start += B_PAGE_SIZE) {
551 page_table_entry *e = &pt[index];
552 // fetch indirect descriptor
553 //XXX:clear the indirect descriptor too??
554 if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
555 phys_addr_t indirectAddress = PIE_TO_TA(pt[index]);
556 e = (page_table_entry*)MapperGetPageTableAt(
557 PIE_TO_TA(pt[index]));
560 page_table_entry oldEntry
561 = M68KPagingMethod040::ClearPageTableEntry(e);
562 if (PTE_TYPE(oldEntry) != DT_PAGE)
563 continue;
565 fMapCount--;
567 if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
568 // Note, that we only need to invalidate the address, if the
569 // accessed flags was set, since only then the entry could have
570 // been in any TLB.
571 InvalidatePage(start);
574 if (area->cache_type != CACHE_TYPE_DEVICE) {
575 // get the page
576 vm_page* page = vm_lookup_page(
577 (oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
578 ASSERT(page != NULL);
580 DEBUG_PAGE_ACCESS_START(page);
582 // transfer the accessed/dirty flags to the page
583 if ((oldEntry & M68K_PTE_ACCESSED) != 0)
584 page->accessed = true;
585 if ((oldEntry & M68K_PTE_DIRTY) != 0)
586 page->modified = true;
588 // remove the mapping object/decrement the wired_count of the
589 // page
590 if (area->wiring == B_NO_LOCK) {
591 vm_page_mapping* mapping = NULL;
592 vm_page_mappings::Iterator iterator
593 = page->mappings.GetIterator();
594 while ((mapping = iterator.Next()) != NULL) {
595 if (mapping->area == area)
596 break;
599 ASSERT(mapping != NULL);
601 area->mappings.Remove(mapping);
602 page->mappings.Remove(mapping);
603 queue.Add(mapping);
604 } else
605 page->DecrementWiredCount();
607 if (!page->IsMapped()) {
608 atomic_add(&gMappedPagesCount, -1);
610 if (updatePageQueue) {
611 if (page->Cache()->temporary)
612 vm_page_set_state(page, PAGE_STATE_INACTIVE);
613 else if (page->modified)
614 vm_page_set_state(page, PAGE_STATE_MODIFIED);
615 else
616 vm_page_set_state(page, PAGE_STATE_CACHED);
620 DEBUG_PAGE_ACCESS_END(page);
624 Flush();
625 // flush explicitly, since we directly use the lock
626 } while (start != 0 && start < end);
628 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
629 // really critical here, as in all cases this method is used, the unmapped
630 // area range is unmapped for good (resized/cut) and the pages will likely
631 // be freed.
633 locker.Unlock();
635 // free removed mappings
636 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
637 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
638 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
639 while (vm_page_mapping* mapping = queue.RemoveHead())
640 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
644 void
645 M68KVMTranslationMap040::UnmapArea(VMArea* area, bool deletingAddressSpace,
646 bool ignoreTopCachePageFlags)
648 if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
649 M68KVMTranslationMap040::UnmapPages(area, area->Base(), area->Size(),
650 true);
651 return;
654 bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
656 page_root_entry* pr = fPagingStructures->pgroot_virt;
658 RecursiveLocker locker(fLock);
660 VMAreaMappings mappings;
661 mappings.MoveFrom(&area->mappings);
663 for (VMAreaMappings::Iterator it = mappings.GetIterator();
664 vm_page_mapping* mapping = it.Next();) {
665 vm_page* page = mapping->page;
666 page->mappings.Remove(mapping);
668 VMCache* cache = page->Cache();
670 bool pageFullyUnmapped = false;
671 if (!page->IsMapped()) {
672 atomic_add(&gMappedPagesCount, -1);
673 pageFullyUnmapped = true;
676 if (unmapPages || cache != area->cache) {
677 addr_t address = area->Base()
678 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
680 int index;
681 index = VADDR_TO_PRENT(address);
682 if (PRE_TYPE(pr[index]) != DT_ROOT) {
683 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
684 "has no page root entry", page, area, address);
685 continue;
688 ThreadCPUPinner pinner(thread_get_current_thread());
690 page_directory_entry* pd
691 = (page_directory_entry*)MapperGetPageTableAt(
692 pr[index] & M68K_PRE_ADDRESS_MASK);
694 index = VADDR_TO_PDENT(address);
695 if (PDE_TYPE(pr[index]) != DT_DIR) {
696 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
697 "has no page dir entry", page, area, address);
698 continue;
701 page_table_entry* pt
702 = (page_table_entry*)MapperGetPageTableAt(
703 pd[index] & M68K_PDE_ADDRESS_MASK);
705 //XXX:M68K: DT_INDIRECT here?
707 page_table_entry oldEntry
708 = M68KPagingMethod040::ClearPageTableEntry(
709 &pt[VADDR_TO_PTENT(address)]);
711 pinner.Unlock();
713 if (PTE_TYPE(oldEntry) != DT_PAGE) {
714 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
715 "has no page table entry", page, area, address);
716 continue;
719 // transfer the accessed/dirty flags to the page and invalidate
720 // the mapping, if necessary
721 if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
722 page->accessed = true;
724 if (!deletingAddressSpace)
725 InvalidatePage(address);
728 if ((oldEntry & M68K_PTE_DIRTY) != 0)
729 page->modified = true;
731 if (pageFullyUnmapped) {
732 DEBUG_PAGE_ACCESS_START(page);
734 if (cache->temporary)
735 vm_page_set_state(page, PAGE_STATE_INACTIVE);
736 else if (page->modified)
737 vm_page_set_state(page, PAGE_STATE_MODIFIED);
738 else
739 vm_page_set_state(page, PAGE_STATE_CACHED);
741 DEBUG_PAGE_ACCESS_END(page);
745 fMapCount--;
748 Flush();
749 // flush explicitely, since we directly use the lock
751 locker.Unlock();
753 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
754 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
755 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
756 while (vm_page_mapping* mapping = mappings.RemoveHead())
757 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
761 status_t
762 M68KVMTranslationMap040::Query(addr_t va, phys_addr_t *_physical,
763 uint32 *_flags)
765 // default the flags to not present
766 *_flags = 0;
767 *_physical = 0;
768 TRACE("040::Query(0x%lx,)\n", va);
770 int index = VADDR_TO_PRENT(va);
771 page_root_entry *pr = fPagingStructures->pgroot_virt;
772 if (PRE_TYPE(pr[index]) != DT_ROOT) {
773 // no pagetable here
774 return B_OK;
777 Thread* thread = thread_get_current_thread();
778 ThreadCPUPinner pinner(thread);
780 page_directory_entry* pd = (page_directory_entry*)MapperGetPageTableAt(
781 pr[index] & M68K_PDE_ADDRESS_MASK);
783 index = VADDR_TO_PDENT(va);
784 if (PDE_TYPE(pd[index]) != DT_DIR) {
785 // no pagetable here
786 return B_OK;
789 page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
790 pd[index] & M68K_PDE_ADDRESS_MASK);
792 index = VADDR_TO_PTENT(va);
793 if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
794 pt = (page_table_entry*)MapperGetPageTableAt(
795 pt[index] & M68K_PIE_ADDRESS_MASK);
796 index = 0;
799 page_table_entry entry = pt[index];
801 *_physical = entry & M68K_PTE_ADDRESS_MASK;
803 // read in the page state flags
804 if ((entry & M68K_PTE_SUPERVISOR) == 0) {
805 *_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0)
806 | B_READ_AREA;
809 *_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0)
810 | B_KERNEL_READ_AREA
811 | ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
812 | ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
813 | ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0);
815 pinner.Unlock();
817 TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
819 return B_OK;
823 status_t
824 M68KVMTranslationMap040::QueryInterrupt(addr_t va, phys_addr_t *_physical,
825 uint32 *_flags)
827 *_flags = 0;
828 *_physical = 0;
829 TRACE("040::QueryInterrupt(0x%lx,)\n", va);
831 int index = VADDR_TO_PRENT(va);
832 page_root_entry* pr = fPagingStructures->pgroot_virt;
833 if (PRE_TYPE(pr[index]) != DT_ROOT) {
834 // no pagetable here
835 return B_OK;
838 // map page table entry
839 phys_addr_t ppr = pr[index] & M68K_PRE_ADDRESS_MASK;
840 page_directory_entry* pd = (page_directory_entry*)((char *)
841 M68KPagingMethod040::Method()->PhysicalPageMapper()
842 ->InterruptGetPageTableAt(ppr & ~(B_PAGE_SIZE-1))
843 + (ppr % B_PAGE_SIZE));
845 index = VADDR_TO_PDENT(va);
846 if (PDE_TYPE(pd[index]) != DT_DIR) {
847 // no pagetable here
848 return B_OK;
851 phys_addr_t ppd = pd[index] & M68K_PDE_ADDRESS_MASK;
852 page_table_entry* pt = (page_table_entry*)((char *)
853 M68KPagingMethod040::Method()->PhysicalPageMapper()
854 ->InterruptGetPageTableAt(ppd & ~(B_PAGE_SIZE-1))
855 + (ppd % B_PAGE_SIZE));
857 index = VADDR_TO_PTENT(va);
858 if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
859 phys_addr_t ppt = pt[index] & M68K_PIE_ADDRESS_MASK;
860 pt = (page_table_entry*)((char *)
861 M68KPagingMethod040::Method()->PhysicalPageMapper()
862 ->InterruptGetPageTableAt(ppt & ~(B_PAGE_SIZE-1))
863 + (ppt % B_PAGE_SIZE));
864 index = 0;
867 page_table_entry entry = pt[index];
869 *_physical = entry & M68K_PTE_ADDRESS_MASK;
871 // read in the page state flags
872 if ((entry & M68K_PTE_SUPERVISOR) == 0) {
873 *_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0)
874 | B_READ_AREA;
877 *_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0)
878 | B_KERNEL_READ_AREA
879 | ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
880 | ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
881 | ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0);
883 return B_OK;
887 status_t
888 M68KVMTranslationMap040::Protect(addr_t start, addr_t end, uint32 attributes,
889 uint32 memoryType)
891 start = ROUNDDOWN(start, B_PAGE_SIZE);
892 if (start >= end)
893 return B_OK;
895 TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
896 attributes);
898 return ENOSYS;
899 #if 0
900 // compute protection flags
901 uint32 newProtectionFlags = 0;
902 if ((attributes & B_USER_PROTECTION) != 0) {
903 newProtectionFlags = M68K_PTE_USER;
904 if ((attributes & B_WRITE_AREA) != 0)
905 newProtectionFlags |= M68K_PTE_WRITABLE;
906 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
907 newProtectionFlags = M68K_PTE_WRITABLE;
909 page_directory_entry *pd = fPagingStructures->pgdir_virt;
911 do {
912 int index = VADDR_TO_PDENT(start);
913 if ((pd[index] & M68K_PDE_PRESENT) == 0) {
914 // no page table here, move the start up to access the next page
915 // table
916 start = ROUNDUP(start + 1, kPageTableAlignment);
917 continue;
920 struct thread* thread = thread_get_current_thread();
921 ThreadCPUPinner pinner(thread);
923 page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
924 pd[index] & M68K_PDE_ADDRESS_MASK);
926 for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
927 index++, start += B_PAGE_SIZE) {
928 page_table_entry entry = pt[index];
929 if ((entry & M68K_PTE_PRESENT) == 0) {
930 // page mapping not valid
931 continue;
934 TRACE("protect_tmap: protect page 0x%lx\n", start);
936 // set the new protection flags -- we want to do that atomically,
937 // without changing the accessed or dirty flag
938 page_table_entry oldEntry;
939 while (true) {
940 oldEntry = M68KPagingMethod040::TestAndSetPageTableEntry(
941 &pt[index],
942 (entry & ~(M68K_PTE_PROTECTION_MASK
943 | M68K_PTE_MEMORY_TYPE_MASK))
944 | newProtectionFlags
945 | M68KPagingMethod040::MemoryTypeToPageTableEntryFlags(
946 memoryType),
947 entry);
948 if (oldEntry == entry)
949 break;
950 entry = oldEntry;
953 if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
954 // Note, that we only need to invalidate the address, if the
955 // accessed flag was set, since only then the entry could have
956 // been in any TLB.
957 InvalidatePage(start);
960 } while (start != 0 && start < end);
961 return B_OK;
962 #endif
966 status_t
967 M68KVMTranslationMap040::ClearFlags(addr_t va, uint32 flags)
969 return ENOSYS;
970 #if 0
971 int index = VADDR_TO_PDENT(va);
972 page_directory_entry* pd = fPagingStructures->pgdir_virt;
973 if ((pd[index] & M68K_PDE_PRESENT) == 0) {
974 // no pagetable here
975 return B_OK;
978 uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? M68K_PTE_DIRTY : 0)
979 | ((flags & PAGE_ACCESSED) ? M68K_PTE_ACCESSED : 0);
981 struct thread* thread = thread_get_current_thread();
982 ThreadCPUPinner pinner(thread);
984 page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
985 pd[index] & M68K_PDE_ADDRESS_MASK);
986 index = VADDR_TO_PTENT(va);
988 // clear out the flags we've been requested to clear
989 page_table_entry oldEntry
990 = M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
991 flagsToClear);
993 pinner.Unlock();
995 if ((oldEntry & flagsToClear) != 0)
996 InvalidatePage(va);
998 return B_OK;
999 #endif
1003 bool
1004 M68KVMTranslationMap040::ClearAccessedAndModified(VMArea* area, addr_t address,
1005 bool unmapIfUnaccessed, bool& _modified)
1007 ASSERT(address % B_PAGE_SIZE == 0);
1009 page_root_entry* pr = fPagingStructures->pgroot_virt;
1011 TRACE("M68KVMTranslationMap040::ClearAccessedAndModified(%#" B_PRIxADDR
1012 ")\n", address);
1014 #if 0
1015 RecursiveLocker locker(fLock);
1017 int index = VADDR_TO_PDENT(address);
1018 if ((pd[index] & M68K_PDE_PRESENT) == 0)
1019 return false;
1021 ThreadCPUPinner pinner(thread_get_current_thread());
1023 page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
1024 pd[index] & M68K_PDE_ADDRESS_MASK);
1026 index = VADDR_TO_PTENT(address);
1028 // perform the deed
1029 page_table_entry oldEntry;
1031 if (unmapIfUnaccessed) {
1032 while (true) {
1033 oldEntry = pt[index];
1034 if ((oldEntry & M68K_PTE_PRESENT) == 0) {
1035 // page mapping not valid
1036 return false;
1039 if (oldEntry & M68K_PTE_ACCESSED) {
1040 // page was accessed -- just clear the flags
1041 oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(
1042 &pt[index], M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
1043 break;
1046 // page hasn't been accessed -- unmap it
1047 if (M68KPagingMethod040::TestAndSetPageTableEntry(&pt[index], 0,
1048 oldEntry) == oldEntry) {
1049 break;
1052 // something changed -- check again
1054 } else {
1055 oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
1056 M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
1059 pinner.Unlock();
1061 _modified = (oldEntry & M68K_PTE_DIRTY) != 0;
1063 if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
1064 // Note, that we only need to invalidate the address, if the
1065 // accessed flags was set, since only then the entry could have been
1066 // in any TLB.
1067 InvalidatePage(address);
1069 Flush();
1071 return true;
1074 if (!unmapIfUnaccessed)
1075 return false;
1077 // We have unmapped the address. Do the "high level" stuff.
1079 fMapCount--;
1081 locker.Detach();
1082 // UnaccessedPageUnmapped() will unlock for us
1084 UnaccessedPageUnmapped(area,
1085 (oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1087 #endif
1088 return false;
1092 M68KPagingStructures*
1093 M68KVMTranslationMap040::PagingStructures() const
1095 return fPagingStructures;
1099 inline void *
1100 M68KVMTranslationMap040::MapperGetPageTableAt(phys_addr_t physicalAddress,
1101 bool indirect)
1103 // M68K fits several page tables in a single page...
1104 uint32 offset = physicalAddress % B_PAGE_SIZE;
1105 ASSERT((indirect && (offset % 4) == 0) || (offset % SIZ_ROOTTBL) == 0);
1106 physicalAddress &= ~(B_PAGE_SIZE-1);
1107 void *va = fPageMapper->GetPageTableAt(physicalAddress);
1108 return (void *)((addr_t)va + offset);