2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include <arch/vm_translation_map.h>
13 #include <boot/kernel_args.h>
17 # include "paging/64bit/X86PagingMethod64Bit.h"
19 # include "paging/32bit/X86PagingMethod32Bit.h"
20 # include "paging/pae/X86PagingMethodPAE.h"
24 //#define TRACE_VM_TMAP
26 # define TRACE(x...) dprintf(x)
28 # define TRACE(x...) ;
35 char sixty_four
[sizeof(X86PagingMethod64Bit
)];
37 char thirty_two
[sizeof(X86PagingMethod32Bit
)];
38 #if B_HAIKU_PHYSICAL_BITS == 64
39 char pae
[sizeof(X86PagingMethodPAE
)];
42 } sPagingMethodBuffer
;
45 // #pragma mark - VM API
49 arch_vm_translation_map_create_map(bool kernel
, VMTranslationMap
** _map
)
51 return gX86PagingMethod
->CreateTranslationMap(kernel
, _map
);
56 arch_vm_translation_map_init(kernel_args
*args
,
57 VMPhysicalPageMapper
** _physicalPageMapper
)
59 TRACE("vm_translation_map_init: entry\n");
62 TRACE("physical memory ranges:\n");
63 for (uint32 i
= 0; i
< args
->num_physical_memory_ranges
; i
++) {
64 phys_addr_t start
= args
->physical_memory_range
[i
].start
;
65 phys_addr_t end
= start
+ args
->physical_memory_range
[i
].size
;
66 TRACE(" %#10" B_PRIxPHYSADDR
" - %#10" B_PRIxPHYSADDR
"\n", start
,
70 TRACE("allocated physical ranges:\n");
71 for (uint32 i
= 0; i
< args
->num_physical_allocated_ranges
; i
++) {
72 phys_addr_t start
= args
->physical_allocated_range
[i
].start
;
73 phys_addr_t end
= start
+ args
->physical_allocated_range
[i
].size
;
74 TRACE(" %#10" B_PRIxPHYSADDR
" - %#10" B_PRIxPHYSADDR
"\n", start
,
78 TRACE("allocated virtual ranges:\n");
79 for (uint32 i
= 0; i
< args
->num_virtual_allocated_ranges
; i
++) {
80 addr_t start
= args
->virtual_allocated_range
[i
].start
;
81 addr_t end
= start
+ args
->virtual_allocated_range
[i
].size
;
82 TRACE(" %#10" B_PRIxADDR
" - %#10" B_PRIxADDR
"\n", start
, end
);
87 gX86PagingMethod
= new(&sPagingMethodBuffer
) X86PagingMethod64Bit
;
88 #elif B_HAIKU_PHYSICAL_BITS == 64
89 bool paeAvailable
= x86_check_feature(IA32_FEATURE_PAE
, FEATURE_COMMON
);
90 bool paeNeeded
= x86_check_feature(IA32_FEATURE_AMD_EXT_NX
,
93 for (uint32 i
= 0; i
< args
->num_physical_memory_ranges
; i
++) {
94 phys_addr_t end
= args
->physical_memory_range
[i
].start
95 + args
->physical_memory_range
[i
].size
;
96 if (end
> 0x100000000LL
) {
103 bool paeDisabled
= get_safemode_boolean_early(args
,
104 B_SAFEMODE_4_GB_MEMORY_LIMIT
, false);
106 if (paeAvailable
&& paeNeeded
&& !paeDisabled
) {
107 dprintf("using PAE paging\n");
108 gX86PagingMethod
= new(&sPagingMethodBuffer
) X86PagingMethodPAE
;
110 dprintf("using 32 bit paging (PAE %s)\n",
113 : (paeDisabled
? "disabled" : "not needed"));
114 gX86PagingMethod
= new(&sPagingMethodBuffer
) X86PagingMethod32Bit
;
117 gX86PagingMethod
= new(&sPagingMethodBuffer
) X86PagingMethod32Bit
;
120 return gX86PagingMethod
->Init(args
, _physicalPageMapper
);
125 arch_vm_translation_map_init_post_sem(kernel_args
*args
)
132 arch_vm_translation_map_init_post_area(kernel_args
*args
)
134 TRACE("vm_translation_map_init_post_area: entry\n");
136 return gX86PagingMethod
->InitPostArea(args
);
141 arch_vm_translation_map_early_map(kernel_args
*args
, addr_t va
, phys_addr_t pa
,
142 uint8 attributes
, phys_addr_t (*get_free_page
)(kernel_args
*))
144 TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR
" va %#" B_PRIxADDR
"\n", pa
,
147 return gX86PagingMethod
->MapEarly(args
, va
, pa
, attributes
, get_free_page
);
151 /*! Verifies that the page at the given virtual address can be accessed in the
154 This function is invoked in the kernel debugger. Paranoid checking is in
157 \param virtualAddress The virtual address to be checked.
158 \param protection The area protection for which to check. Valid is a bitwise
159 or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
160 \return \c true, if the address can be accessed in all ways specified by
161 \a protection, \c false otherwise.
164 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress
,
167 if (!gX86PagingMethod
)
170 return gX86PagingMethod
->IsKernelPageAccessible(virtualAddress
, protection
);