2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3 * Distributed under the terms of the MIT License.
5 #ifndef KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
6 #define KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
15 #include <vm/vm_types.h>
16 #include <vm/VMAddressSpace.h>
18 #include "paging/x86_physical_page_mapper.h"
19 #include "paging/X86PagingStructures.h"
20 #include "paging/X86VMTranslationMap.h"
26 /*! Physical page mapper implementation for use where the whole of physical
27 memory is permanently mapped into the kernel address space.
29 This is used on x86_64 where the virtual address space is likely a great
30 deal larger than the amount of physical memory in the machine, so it can
31 all be mapped in permanently, which is faster and makes life much easier.
35 // #pragma mark - TranslationMapPhysicalPageMapper
39 TranslationMapPhysicalPageMapper::Delete()
46 TranslationMapPhysicalPageMapper::GetPageTableAt(
47 phys_addr_t physicalAddress
)
49 ASSERT(physicalAddress
% B_PAGE_SIZE
== 0);
51 return (void*)(physicalAddress
+ KERNEL_PMAP_BASE
);
55 // #pragma mark - X86PhysicalPageMapper
59 X86PhysicalPageMapper::CreateTranslationMapPhysicalPageMapper(
60 TranslationMapPhysicalPageMapper
** _mapper
)
62 auto mapper
= new(std::nothrow
) TranslationMapPhysicalPageMapper
;
72 X86PhysicalPageMapper::InterruptGetPageTableAt(
73 phys_addr_t physicalAddress
)
75 ASSERT(physicalAddress
% B_PAGE_SIZE
== 0);
77 return (void*)(physicalAddress
+ KERNEL_PMAP_BASE
);
82 X86PhysicalPageMapper::GetPage(phys_addr_t physicalAddress
,
83 addr_t
* virtualAddress
, void** handle
)
85 if (physicalAddress
>= KERNEL_PMAP_BASE
)
88 *virtualAddress
= physicalAddress
+ KERNEL_PMAP_BASE
;
94 X86PhysicalPageMapper::PutPage(addr_t virtualAddress
, void* handle
)
101 X86PhysicalPageMapper::GetPageCurrentCPU(phys_addr_t physicalAddress
,
102 addr_t
* virtualAddress
, void** handle
)
104 if (physicalAddress
>= KERNEL_PMAP_BASE
)
105 return B_BAD_ADDRESS
;
107 *virtualAddress
= physicalAddress
+ KERNEL_PMAP_BASE
;
113 X86PhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress
,
121 X86PhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress
,
122 addr_t
* virtualAddress
, void** handle
)
124 if (physicalAddress
>= KERNEL_PMAP_BASE
)
125 return B_BAD_ADDRESS
;
127 *virtualAddress
= physicalAddress
+ KERNEL_PMAP_BASE
;
133 X86PhysicalPageMapper::PutPageDebug(addr_t virtualAddress
, void* handle
)
140 X86PhysicalPageMapper::MemsetPhysical(phys_addr_t address
, int value
,
143 if (address
>= KERNEL_PMAP_SIZE
|| address
+ length
> KERNEL_PMAP_SIZE
)
144 return B_BAD_ADDRESS
;
146 memset((void*)(address
+ KERNEL_PMAP_BASE
), value
, length
);
152 X86PhysicalPageMapper::MemcpyFromPhysical(void* to
, phys_addr_t _from
,
153 size_t length
, bool user
)
155 if (_from
>= KERNEL_PMAP_SIZE
|| _from
+ length
> KERNEL_PMAP_SIZE
)
156 return B_BAD_ADDRESS
;
158 auto from
= (void*)(_from
+ KERNEL_PMAP_BASE
);
161 return user_memcpy(to
, from
, length
);
163 memcpy(to
, from
, length
);
170 X86PhysicalPageMapper::MemcpyToPhysical(phys_addr_t _to
, const void* from
,
171 size_t length
, bool user
)
173 if (_to
>= KERNEL_PMAP_SIZE
|| _to
+ length
> KERNEL_PMAP_SIZE
)
174 return B_BAD_ADDRESS
;
176 auto to
= (void*)(_to
+ KERNEL_PMAP_BASE
);
179 return user_memcpy(to
, from
, length
);
181 memcpy(to
, from
, length
);
187 X86PhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to
,
190 memcpy((void*)(to
+ KERNEL_PMAP_BASE
), (void*)(from
+ KERNEL_PMAP_BASE
),
195 status_t
mapped_physical_page_ops_init(kernel_args
* args
,
196 X86PhysicalPageMapper
*& _pageMapper
,
197 TranslationMapPhysicalPageMapper
*& _kernelPageMapper
);
200 #endif // KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H