2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
5 * Distributed under the terms of the MIT License.
7 #ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
8 #define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
13 #include <KernelExport.h>
16 #include <vm/vm_types.h>
18 #include "paging/64bit/paging.h"
19 #include "paging/X86PagingMethod.h"
20 #include "paging/X86PagingStructures.h"
23 class TranslationMapPhysicalPageMapper
;
24 class X86PhysicalPageMapper
;
25 struct vm_page_reservation
;
28 class X86PagingMethod64Bit final
: public X86PagingMethod
{
30 X86PagingMethod64Bit();
31 virtual ~X86PagingMethod64Bit();
33 virtual status_t
Init(kernel_args
* args
,
34 VMPhysicalPageMapper
** _physicalPageMapper
);
35 virtual status_t
InitPostArea(kernel_args
* args
);
37 virtual status_t
CreateTranslationMap(bool kernel
,
38 VMTranslationMap
** _map
);
40 virtual status_t
MapEarly(kernel_args
* args
,
41 addr_t virtualAddress
,
42 phys_addr_t physicalAddress
,
44 page_num_t (*get_free_page
)(kernel_args
*));
46 virtual bool IsKernelPageAccessible(addr_t virtualAddress
,
49 inline X86PhysicalPageMapper
* PhysicalPageMapper() const
50 { return fPhysicalPageMapper
; }
51 inline TranslationMapPhysicalPageMapper
* KernelPhysicalPageMapper() const
52 { return fKernelPhysicalPageMapper
; }
54 inline uint64
* KernelVirtualPML4() const
55 { return fKernelVirtualPML4
; }
56 inline phys_addr_t
KernelPhysicalPML4() const
57 { return fKernelPhysicalPML4
; }
59 static X86PagingMethod64Bit
* Method();
61 static uint64
* PageDirectoryForAddress(uint64
* virtualPML4
,
62 addr_t virtualAddress
, bool isKernel
,
64 vm_page_reservation
* reservation
,
65 TranslationMapPhysicalPageMapper
*
66 pageMapper
, int32
& mapCount
);
67 static uint64
* PageDirectoryEntryForAddress(
68 uint64
* virtualPML4
, addr_t virtualAddress
,
69 bool isKernel
, bool allocateTables
,
70 vm_page_reservation
* reservation
,
71 TranslationMapPhysicalPageMapper
*
72 pageMapper
, int32
& mapCount
);
73 static uint64
* PageTableForAddress(uint64
* virtualPML4
,
74 addr_t virtualAddress
, bool isKernel
,
76 vm_page_reservation
* reservation
,
77 TranslationMapPhysicalPageMapper
*
78 pageMapper
, int32
& mapCount
);
79 static uint64
* PageTableEntryForAddress(uint64
* virtualPML4
,
80 addr_t virtualAddress
, bool isKernel
,
82 vm_page_reservation
* reservation
,
83 TranslationMapPhysicalPageMapper
*
84 pageMapper
, int32
& mapCount
);
86 static void PutPageTableEntryInTable(
87 uint64
* entry
, phys_addr_t physicalAddress
,
88 uint32 attributes
, uint32 memoryType
,
90 static void SetTableEntry(uint64_t* entry
,
92 static uint64_t SetTableEntryFlags(uint64_t* entryPointer
,
94 static uint64
TestAndSetTableEntry(uint64
* entry
,
95 uint64 newEntry
, uint64 oldEntry
);
96 static uint64_t ClearTableEntry(uint64_t* entryPointer
);
97 static uint64_t ClearTableEntryFlags(uint64_t* entryPointer
,
100 static uint64
MemoryTypeToPageTableEntryFlags(
104 static void _EnableExecutionDisable(void* dummy
, int cpu
);
106 phys_addr_t fKernelPhysicalPML4
;
107 uint64
* fKernelVirtualPML4
;
109 X86PhysicalPageMapper
* fPhysicalPageMapper
;
110 TranslationMapPhysicalPageMapper
* fKernelPhysicalPageMapper
;
114 static_assert(sizeof(std::atomic
<uint64_t>) == sizeof(uint64_t),
115 "Non-trivial representation of atomic uint64_t.");
118 /*static*/ inline X86PagingMethod64Bit
*
119 X86PagingMethod64Bit::Method()
121 return static_cast<X86PagingMethod64Bit
*>(gX86PagingMethod
);
125 /*static*/ inline void
126 X86PagingMethod64Bit::SetTableEntry(uint64_t* entryPointer
, uint64_t newEntry
)
128 auto& entry
= *reinterpret_cast<std::atomic
<uint64_t>*>(entryPointer
);
129 entry
.store(newEntry
, std::memory_order_relaxed
);
133 /*static*/ inline uint64_t
134 X86PagingMethod64Bit::SetTableEntryFlags(uint64_t* entryPointer
, uint64_t flags
)
136 auto& entry
= *reinterpret_cast<std::atomic
<uint64_t>*>(entryPointer
);
137 return entry
.fetch_or(flags
);
141 /*static*/ inline uint64
142 X86PagingMethod64Bit::TestAndSetTableEntry(uint64
* entry
, uint64 newEntry
, uint64 oldEntry
)
144 return atomic_test_and_set64((int64
*)entry
, newEntry
, oldEntry
);
148 /*static*/ inline uint64_t
149 X86PagingMethod64Bit::ClearTableEntry(uint64_t* entryPointer
)
151 auto& entry
= *reinterpret_cast<std::atomic
<uint64_t>*>(entryPointer
);
152 return entry
.exchange(0);
156 /*static*/ inline uint64_t
157 X86PagingMethod64Bit::ClearTableEntryFlags(uint64_t* entryPointer
,
160 auto& entry
= *reinterpret_cast<std::atomic
<uint64_t>*>(entryPointer
);
161 return entry
.fetch_and(~flags
);
165 /*static*/ inline uint64
166 X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType
)
168 // ATM we only handle the uncacheable and write-through type explicitly. For
169 // all other types we rely on the MTRRs to be set up correctly. Since we set
170 // the default memory type to write-back and since the uncacheable type in
171 // the PTE overrides any MTRR attribute (though, as per the specs, that is
172 // not recommended for performance reasons), this reduces the work we
173 // actually *have* to do with the MTRRs to setting the remaining types
174 // (usually only write-combining for the frame buffer).
175 switch (memoryType
) {
177 return X86_64_PTE_CACHING_DISABLED
| X86_64_PTE_WRITE_THROUGH
;
180 // X86_PTE_WRITE_THROUGH would be closer, but the combination with
181 // MTRR WC is "implementation defined" for Pentium Pro/II.
185 return X86_64_PTE_WRITE_THROUGH
;
195 #endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H