btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / arch / x86 / paging / x86_physical_page_mapper_mapped.h
blob6e5339edb611e37b40b021e37531cea933a9edd6
1 /*
2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
6 #define KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
9 #include <OS.h>
11 #include <cpu.h>
12 #include <kernel.h>
13 #include <smp.h>
14 #include <vm/vm.h>
15 #include <vm/vm_types.h>
16 #include <vm/VMAddressSpace.h>
18 #include "paging/x86_physical_page_mapper.h"
19 #include "paging/X86PagingStructures.h"
20 #include "paging/X86VMTranslationMap.h"
23 struct kernel_args;
26 /*! Physical page mapper implementation for use where the whole of physical
27 memory is permanently mapped into the kernel address space.
29 This is used on x86_64 where the virtual address space is likely a great
30 deal larger than the amount of physical memory in the machine, so it can
31 all be mapped in permanently, which is faster and makes life much easier.
35 // #pragma mark - TranslationMapPhysicalPageMapper
38 inline void
39 TranslationMapPhysicalPageMapper::Delete()
41 delete this;
45 inline void*
46 TranslationMapPhysicalPageMapper::GetPageTableAt(
47 phys_addr_t physicalAddress)
49 ASSERT(physicalAddress % B_PAGE_SIZE == 0);
51 return (void*)(physicalAddress + KERNEL_PMAP_BASE);
55 // #pragma mark - X86PhysicalPageMapper
58 inline status_t
59 X86PhysicalPageMapper::CreateTranslationMapPhysicalPageMapper(
60 TranslationMapPhysicalPageMapper** _mapper)
62 auto mapper = new(std::nothrow) TranslationMapPhysicalPageMapper;
63 if (mapper == NULL)
64 return B_NO_MEMORY;
66 *_mapper = mapper;
67 return B_OK;
71 inline void*
72 X86PhysicalPageMapper::InterruptGetPageTableAt(
73 phys_addr_t physicalAddress)
75 ASSERT(physicalAddress % B_PAGE_SIZE == 0);
77 return (void*)(physicalAddress + KERNEL_PMAP_BASE);
81 inline status_t
82 X86PhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
83 addr_t* virtualAddress, void** handle)
85 if (physicalAddress >= KERNEL_PMAP_BASE)
86 return B_BAD_ADDRESS;
88 *virtualAddress = physicalAddress + KERNEL_PMAP_BASE;
89 return B_OK;
93 inline status_t
94 X86PhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
96 return B_OK;
100 inline status_t
101 X86PhysicalPageMapper::GetPageCurrentCPU(phys_addr_t physicalAddress,
102 addr_t* virtualAddress, void** handle)
104 if (physicalAddress >= KERNEL_PMAP_BASE)
105 return B_BAD_ADDRESS;
107 *virtualAddress = physicalAddress + KERNEL_PMAP_BASE;
108 return B_OK;
112 inline status_t
113 X86PhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
114 void* handle)
116 return B_OK;
120 inline status_t
121 X86PhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
122 addr_t* virtualAddress, void** handle)
124 if (physicalAddress >= KERNEL_PMAP_BASE)
125 return B_BAD_ADDRESS;
127 *virtualAddress = physicalAddress + KERNEL_PMAP_BASE;
128 return B_OK;
132 inline status_t
133 X86PhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
135 return B_OK;
139 inline status_t
140 X86PhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
141 phys_size_t length)
143 if (address >= KERNEL_PMAP_SIZE || address + length > KERNEL_PMAP_SIZE)
144 return B_BAD_ADDRESS;
146 memset((void*)(address + KERNEL_PMAP_BASE), value, length);
147 return B_OK;
151 inline status_t
152 X86PhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t _from,
153 size_t length, bool user)
155 if (_from >= KERNEL_PMAP_SIZE || _from + length > KERNEL_PMAP_SIZE)
156 return B_BAD_ADDRESS;
158 auto from = (void*)(_from + KERNEL_PMAP_BASE);
160 if (user)
161 return user_memcpy(to, from, length);
162 else
163 memcpy(to, from, length);
165 return B_OK;
169 inline status_t
170 X86PhysicalPageMapper::MemcpyToPhysical(phys_addr_t _to, const void* from,
171 size_t length, bool user)
173 if (_to >= KERNEL_PMAP_SIZE || _to + length > KERNEL_PMAP_SIZE)
174 return B_BAD_ADDRESS;
176 auto to = (void*)(_to + KERNEL_PMAP_BASE);
178 if (user)
179 return user_memcpy(to, from, length);
181 memcpy(to, from, length);
182 return B_OK;
186 inline void
187 X86PhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
188 phys_addr_t from)
190 memcpy((void*)(to + KERNEL_PMAP_BASE), (void*)(from + KERNEL_PMAP_BASE),
191 B_PAGE_SIZE);
195 status_t mapped_physical_page_ops_init(kernel_args* args,
196 X86PhysicalPageMapper*& _pageMapper,
197 TranslationMapPhysicalPageMapper*& _kernelPageMapper);
200 #endif // KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H