2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
10 /* (bonefish) Some explanatory words on how address translation is implemented
11 for the 32 bit PPC architecture.
13 I use the address type nomenclature as used in the PPC architecture
15 - effective address: An address as used by program instructions, i.e.
16 that's what elsewhere (e.g. in the VM implementation) is called
18 - virtual address: An intermediate address computed from the effective
19 address via the segment registers.
20 - physical address: An address referring to physical storage.
22 The hardware translates an effective address to a physical address using
23 either of two mechanisms: 1) Block Address Translation (BAT) or
24 2) segment + page translation. The first mechanism does this directly
25 using two sets (for data/instructions) of special purpose registers.
26 The latter mechanism is of more relevance here, though:
28 effective address (32 bit): [ 0 ESID 3 | 4 PIX 19 | 20 Byte 31 ]
30 (segment registers) | |
32 virtual address (52 bit): [ 0 VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33 [ 0 VPN 39 | 40 Byte 51 ]
37 physical address (32 bit): [ 0 PPN 19 | 20 Byte 31 ]
40 ESID: Effective Segment ID
41 VSID: Virtual Segment ID
43 VPN: Virtual Page Number
44 PPN: Physical Page Number
47 Unlike on x86 we can't just switch the context to another team by just
48 setting a register to another page directory, since we only have one
49 page table containing both kernel and user address mappings. Instead we
50 map the effective address space of kernel and *all* teams
51 non-intersectingly into the virtual address space (which fortunately is
52 20 bits wider), and use the segment registers to select the section of
53 the virtual address space for the current team. Half of the 16 segment
54 registers (8 - 15) map the kernel addresses, so they remain unchanged.
56 The range of the virtual address space a team's effective address space
57 is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58 which is the first of the 8 successive VSID values used for the team.
60 Which fVSIDBase values are already taken is defined by the set bits in
61 the bitmap sVSIDBaseBitmap.
65 * If we want to continue to use the OF services, we would need to add
66 its address mappings to the kernel space. Unfortunately some stuff
67 (especially RAM) is mapped in an address range without the kernel
68 address space. We probably need to map those into each team's address
69 space as kernel read/write areas.
70 * The current locking scheme is insufficient. The page table is a resource
71 shared by all teams. We need to synchronize access to it. Probably via a
75 #include <arch/vm_translation_map.h>
79 #include <KernelExport.h>
82 //#include <arch_mmu.h>
83 #include <boot/kernel_args.h>
86 #include <slab/Slab.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_priv.h>
90 #include <vm/VMAddressSpace.h>
91 #include <vm/VMCache.h>
93 #include <util/AutoLock.h>
95 #include "generic_vm_physical_page_mapper.h"
96 //#include "generic_vm_physical_page_ops.h"
97 //#include "GenericVMPhysicalPageMapper.h"
99 #include "paging/PPCVMTranslationMap.h"
100 #include "paging/classic/PPCPagingMethodClassic.h"
101 //#include "paging/460/PPCPagingMethod460.h"
104 #define TRACE_VM_TMAP
106 # define TRACE(x...) dprintf(x)
108 # define TRACE(x...) ;
114 //char amcc460[sizeof(PPCPagingMethod460)];
115 char classic
[sizeof(PPCPagingMethodClassic
)];
116 } sPagingMethodBuffer
;
120 struct PPCVMTranslationMap
: VMTranslationMap
{
121 PPCVMTranslationMap();
122 virtual ~PPCVMTranslationMap();
124 status_t
Init(bool kernel
);
126 inline int VSIDBase() const { return fVSIDBase
; }
128 page_table_entry
* LookupPageTableEntry(addr_t virtualAddress
);
129 bool RemovePageTableEntry(addr_t virtualAddress
);
132 virtual void Unlock();
134 virtual addr_t
MappedSize() const;
135 virtual size_t MaxPagesNeededToMap(addr_t start
,
138 virtual status_t
Map(addr_t virtualAddress
,
139 phys_addr_t physicalAddress
,
140 uint32 attributes
, uint32 memoryType
,
141 vm_page_reservation
* reservation
);
142 virtual status_t
Unmap(addr_t start
, addr_t end
);
144 virtual status_t
UnmapPage(VMArea
* area
, addr_t address
,
145 bool updatePageQueue
);
147 virtual status_t
Query(addr_t virtualAddress
,
148 phys_addr_t
* _physicalAddress
,
150 virtual status_t
QueryInterrupt(addr_t virtualAddress
,
151 phys_addr_t
* _physicalAddress
,
154 virtual status_t
Protect(addr_t base
, addr_t top
,
155 uint32 attributes
, uint32 memoryType
);
156 virtual status_t
ClearFlags(addr_t virtualAddress
,
159 virtual bool ClearAccessedAndModified(
160 VMArea
* area
, addr_t address
,
161 bool unmapIfUnaccessed
,
164 virtual void Flush();
173 ppc_translation_map_change_asid(VMTranslationMap
*map
)
175 static_cast<PPCVMTranslationMap
*>(map
)->ChangeASID();
182 #if 0//XXX:Not needed anymore ?
184 PPCVMTranslationMap::MappedSize() const
191 get_physical_page_tmap(phys_addr_t physicalAddress
, addr_t
*_virtualAddress
,
194 return generic_get_physical_page(physicalAddress
, _virtualAddress
, 0);
199 put_physical_page_tmap(addr_t virtualAddress
, void *handle
)
201 return generic_put_physical_page(virtualAddress
);
211 arch_vm_translation_map_create_map(bool kernel
, VMTranslationMap
** _map
)
213 return gPPCPagingMethod
->CreateTranslationMap(kernel
, _map
);
218 arch_vm_translation_map_init(kernel_args
*args
,
219 VMPhysicalPageMapper
** _physicalPageMapper
)
221 TRACE("vm_translation_map_init: entry\n");
224 TRACE("physical memory ranges:\n");
225 for (uint32 i
= 0; i
< args
->num_physical_memory_ranges
; i
++) {
226 phys_addr_t start
= args
->physical_memory_range
[i
].start
;
227 phys_addr_t end
= start
+ args
->physical_memory_range
[i
].size
;
228 TRACE(" %#10" B_PRIxPHYSADDR
" - %#10" B_PRIxPHYSADDR
"\n", start
,
232 TRACE("allocated physical ranges:\n");
233 for (uint32 i
= 0; i
< args
->num_physical_allocated_ranges
; i
++) {
234 phys_addr_t start
= args
->physical_allocated_range
[i
].start
;
235 phys_addr_t end
= start
+ args
->physical_allocated_range
[i
].size
;
236 TRACE(" %#10" B_PRIxPHYSADDR
" - %#10" B_PRIxPHYSADDR
"\n", start
,
240 TRACE("allocated virtual ranges:\n");
241 for (uint32 i
= 0; i
< args
->num_virtual_allocated_ranges
; i
++) {
242 addr_t start
= args
->virtual_allocated_range
[i
].start
;
243 addr_t end
= start
+ args
->virtual_allocated_range
[i
].size
;
244 TRACE(" %#10" B_PRIxADDR
" - %#10" B_PRIxADDR
"\n", start
, end
);
248 if (false /* TODO:Check for AMCC460! */) {
249 dprintf("using AMCC 460 paging\n");
251 //XXX:gPPCPagingMethod = new(&sPagingMethodBuffer) PPCPagingMethod460;
253 dprintf("using Classic paging\n");
254 gPPCPagingMethod
= new(&sPagingMethodBuffer
) PPCPagingMethodClassic
;
257 return gPPCPagingMethod
->Init(args
, _physicalPageMapper
);
262 arch_vm_translation_map_init_post_area(kernel_args
*args
)
264 TRACE("vm_translation_map_init_post_area: entry\n");
266 return gPPCPagingMethod
->InitPostArea(args
);
271 arch_vm_translation_map_init_post_sem(kernel_args
*args
)
273 // init physical page mapper
274 return generic_vm_physical_page_mapper_init_post_sem(args
);
278 /** Directly maps a page without having knowledge of any kernel structures.
279 * Used only during VM setup.
280 * It currently ignores the "attributes" parameter and sets all pages
285 arch_vm_translation_map_early_map(kernel_args
*args
, addr_t va
, phys_addr_t pa
,
286 uint8 attributes
, phys_addr_t (*get_free_page
)(kernel_args
*))
288 TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR
" va %#" B_PRIxADDR
"\n", pa
,
291 return gPPCPagingMethod
->MapEarly(args
, va
, pa
, attributes
, get_free_page
);
295 // XXX currently assumes this translation map is active
298 arch_vm_translation_map_early_query(addr_t va
, phys_addr_t
*out_physical
)
300 //PANIC_UNIMPLEMENTED();
301 panic("vm_translation_map_early_query(): not yet implemented\n");
310 ppc_map_address_range(addr_t virtualAddress
, phys_addr_t physicalAddress
,
313 addr_t virtualEnd
= ROUNDUP(virtualAddress
+ size
, B_PAGE_SIZE
);
314 virtualAddress
= ROUNDDOWN(virtualAddress
, B_PAGE_SIZE
);
315 physicalAddress
= ROUNDDOWN(physicalAddress
, B_PAGE_SIZE
);
317 VMAddressSpace
*addressSpace
= VMAddressSpace::Kernel();
318 PPCVMTranslationMap
* map
= static_cast<PPCVMTranslationMap
*>(
319 addressSpace
->TranslationMap());
321 vm_page_reservation reservation
;
322 vm_page_reserve_pages(&reservation
, 0, VM_PRIORITY_USER
);
323 // We don't need any pages for mapping.
326 for (; virtualAddress
< virtualEnd
;
327 virtualAddress
+= B_PAGE_SIZE
, physicalAddress
+= B_PAGE_SIZE
) {
328 status_t error
= map
->Map(virtualAddress
, physicalAddress
,
329 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0, &reservation
);
339 ppc_unmap_address_range(addr_t virtualAddress
, size_t size
)
341 addr_t virtualEnd
= ROUNDUP(virtualAddress
+ size
, B_PAGE_SIZE
);
342 virtualAddress
= ROUNDDOWN(virtualAddress
, B_PAGE_SIZE
);
344 VMAddressSpace
*addressSpace
= VMAddressSpace::Kernel();
346 PPCVMTranslationMap
* map
= static_cast<PPCVMTranslationMap
*>(
347 addressSpace
->TranslationMap());
348 map
->Unmap(virtualAddress
, virtualEnd
);
353 ppc_remap_address_range(addr_t
*_virtualAddress
, size_t size
, bool unmap
)
355 VMAddressSpace
*addressSpace
= VMAddressSpace::Kernel();
357 PPCVMTranslationMap
* map
= static_cast<PPCVMTranslationMap
*>(
358 addressSpace
->TranslationMap());
360 return map
->RemapAddressRange(_virtualAddress
, size
, unmap
);
365 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress
,
368 if (!gPPCPagingMethod
)
371 return gPPCPagingMethod
->IsKernelPageAccessible(virtualAddress
, protection
);