2 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
8 #ifndef _KERNEL_VM_VM_H
9 #define _KERNEL_VM_VM_H
15 #include <vm/vm_types.h>
18 struct generic_io_vec
;
21 struct system_memory_info
;
22 struct VMAddressSpace
;
27 struct VMPageWiringInfo
;
30 // area creation flags
31 #define CREATE_AREA_DONT_WAIT 0x01
32 #define CREATE_AREA_UNMAP_ADDRESS_RANGE 0x02
33 #define CREATE_AREA_DONT_CLEAR 0x04
34 #define CREATE_AREA_PRIORITY_VIP 0x08
35 #define CREATE_AREA_DONT_COMMIT_MEMORY 0x10
37 // memory/page allocation priorities
38 #define VM_PRIORITY_USER 0
39 #define VM_PRIORITY_SYSTEM 1
40 #define VM_PRIORITY_VIP 2
43 #define VM_PAGE_RESERVE_USER 512
44 #define VM_PAGE_RESERVE_SYSTEM 128
47 #define VM_MEMORY_RESERVE_USER (VM_PAGE_RESERVE_USER * B_PAGE_SIZE)
48 #define VM_MEMORY_RESERVE_SYSTEM (VM_PAGE_RESERVE_SYSTEM * B_PAGE_SIZE)
51 extern struct ObjectCache
* gPageMappingsObjectCache
;
59 status_t
vm_init(struct kernel_args
*args
);
60 status_t
vm_init_post_sem(struct kernel_args
*args
);
61 status_t
vm_init_post_thread(struct kernel_args
*args
);
62 status_t
vm_init_post_modules(struct kernel_args
*args
);
63 void vm_free_kernel_args(struct kernel_args
*args
);
64 void vm_free_unused_boot_loader_range(addr_t start
, addr_t end
);
65 page_num_t
vm_allocate_early_physical_page(kernel_args
*args
);
66 addr_t
vm_allocate_early(struct kernel_args
*args
, size_t virtualSize
,
67 size_t physicalSize
, uint32 attributes
, addr_t alignment
);
69 void slab_init(struct kernel_args
*args
);
70 void slab_init_post_area();
71 void slab_init_post_sem();
72 void slab_init_post_thread();
74 // to protect code regions with interrupts turned on
75 void permit_page_faults(void);
76 void forbid_page_faults(void);
78 // private kernel only extension (should be moved somewhere else):
79 area_id
create_area_etc(team_id team
, const char *name
, uint32 size
,
80 uint32 lock
, uint32 protection
, uint32 flags
, uint32 guardSize
,
81 const virtual_address_restrictions
* virtualAddressRestrictions
,
82 const physical_address_restrictions
* physicalAddressRestrictions
,
84 area_id
transfer_area(area_id id
, void** _address
, uint32 addressSpec
,
85 team_id target
, bool kernel
);
87 const char* vm_cache_type_to_string(int32 type
);
89 status_t
vm_prepare_kernel_area_debug_protection(area_id id
, void** cookie
);
90 status_t
vm_set_kernel_area_debug_protection(void* cookie
, void* _address
,
91 size_t size
, uint32 protection
);
93 status_t
vm_block_address_range(const char* name
, void* address
, addr_t size
);
94 status_t
vm_unreserve_address_range(team_id team
, void *address
, addr_t size
);
95 status_t
vm_reserve_address_range(team_id team
, void **_address
,
96 uint32 addressSpec
, addr_t size
, uint32 flags
);
97 area_id
vm_create_anonymous_area(team_id team
, const char* name
, addr_t size
,
98 uint32 wiring
, uint32 protection
, uint32 flags
, addr_t guardSize
,
99 const virtual_address_restrictions
* virtualAddressRestrictions
,
100 const physical_address_restrictions
* physicalAddressRestrictions
,
101 bool kernel
, void** _address
);
102 area_id
vm_map_physical_memory(team_id team
, const char *name
, void **address
,
103 uint32 addressSpec
, addr_t size
, uint32 protection
,
104 phys_addr_t physicalAddress
, bool alreadyWired
);
105 area_id
vm_map_physical_memory_vecs(team_id team
, const char* name
,
106 void** _address
, uint32 addressSpec
, addr_t
* _size
, uint32 protection
,
107 struct generic_io_vec
* vecs
, uint32 vecCount
);
108 area_id
vm_map_file(team_id aid
, const char *name
, void **address
,
109 uint32 addressSpec
, addr_t size
, uint32 protection
, uint32 mapping
,
110 bool unmapAddressRange
, int fd
, off_t offset
);
111 struct VMCache
*vm_area_get_locked_cache(struct VMArea
*area
);
112 void vm_area_put_locked_cache(struct VMCache
*cache
);
113 area_id
vm_create_null_area(team_id team
, const char *name
, void **address
,
114 uint32 addressSpec
, addr_t size
, uint32 flags
);
115 area_id
vm_copy_area(team_id team
, const char *name
, void **_address
,
116 uint32 addressSpec
, uint32 protection
, area_id sourceID
);
117 area_id
vm_clone_area(team_id team
, const char *name
, void **address
,
118 uint32 addressSpec
, uint32 protection
, uint32 mapping
,
119 area_id sourceArea
, bool kernel
);
120 status_t
vm_delete_area(team_id teamID
, area_id areaID
, bool kernel
);
121 status_t
vm_create_vnode_cache(struct vnode
*vnode
, struct VMCache
**_cache
);
122 status_t
vm_set_area_memory_type(area_id id
, phys_addr_t physicalBase
,
124 status_t
vm_set_area_protection(team_id team
, area_id areaID
,
125 uint32 newProtection
, bool kernel
);
126 status_t
vm_get_page_mapping(team_id team
, addr_t vaddr
, phys_addr_t
*paddr
);
127 bool vm_test_map_modification(struct vm_page
*page
);
128 void vm_clear_map_flags(struct vm_page
*page
, uint32 flags
);
129 void vm_remove_all_page_mappings(struct vm_page
*page
);
130 int32
vm_clear_page_mapping_accessed_flags(struct vm_page
*page
);
131 int32
vm_remove_all_page_mappings_if_unaccessed(struct vm_page
*page
);
132 status_t
vm_wire_page(team_id team
, addr_t address
, bool writable
,
133 struct VMPageWiringInfo
* info
);
134 void vm_unwire_page(struct VMPageWiringInfo
* info
);
136 status_t
vm_get_physical_page(phys_addr_t paddr
, addr_t
* vaddr
, void** _handle
);
137 status_t
vm_put_physical_page(addr_t vaddr
, void* handle
);
138 status_t
vm_get_physical_page_current_cpu(phys_addr_t paddr
, addr_t
* vaddr
,
140 status_t
vm_put_physical_page_current_cpu(addr_t vaddr
, void* handle
);
141 status_t
vm_get_physical_page_debug(phys_addr_t paddr
, addr_t
* vaddr
,
143 status_t
vm_put_physical_page_debug(addr_t vaddr
, void* handle
);
145 void vm_get_info(system_info
*info
);
146 uint32
vm_num_page_faults(void);
147 off_t
vm_available_memory(void);
148 off_t
vm_available_not_needed_memory(void);
149 off_t
vm_available_not_needed_memory_debug(void);
150 size_t vm_kernel_address_space_left(void);
152 status_t
vm_memset_physical(phys_addr_t address
, int value
, phys_size_t length
);
153 status_t
vm_memcpy_from_physical(void* to
, phys_addr_t from
, size_t length
,
155 status_t
vm_memcpy_to_physical(phys_addr_t to
, const void* from
, size_t length
,
157 void vm_memcpy_physical_page(phys_addr_t to
, phys_addr_t from
);
159 status_t
vm_debug_copy_page_memory(team_id teamID
, void* unsafeMemory
,
160 void* buffer
, size_t size
, bool copyToUnsafe
);
163 area_id
_user_create_area(const char *name
, void **address
, uint32 addressSpec
,
164 size_t size
, uint32 lock
, uint32 protection
);
165 status_t
_user_delete_area(area_id area
);
167 area_id
_user_map_file(const char *uname
, void **uaddress
, uint32 addressSpec
,
168 size_t size
, uint32 protection
, uint32 mapping
,
169 bool unmapAddressRange
, int fd
, off_t offset
);
170 status_t
_user_unmap_memory(void *address
, size_t size
);
171 status_t
_user_set_memory_protection(void* address
, size_t size
,
173 status_t
_user_sync_memory(void *address
, size_t size
, uint32 flags
);
174 status_t
_user_memory_advice(void* address
, size_t size
, uint32 advice
);
175 status_t
_user_get_memory_properties(team_id teamID
, const void *address
,
176 uint32
*_protected
, uint32
*_lock
);
178 area_id
_user_area_for(void *address
);
179 area_id
_user_find_area(const char *name
);
180 status_t
_user_get_area_info(area_id area
, area_info
*info
);
181 status_t
_user_get_next_area_info(team_id team
, ssize_t
*cookie
, area_info
*info
);
182 status_t
_user_resize_area(area_id area
, size_t newSize
);
183 area_id
_user_transfer_area(area_id area
, void **_address
, uint32 addressSpec
,
185 status_t
_user_set_area_protection(area_id area
, uint32 newProtection
);
186 area_id
_user_clone_area(const char *name
, void **_address
, uint32 addressSpec
,
187 uint32 protection
, area_id sourceArea
);
188 status_t
_user_reserve_address_range(addr_t
* userAddress
, uint32 addressSpec
,
190 status_t
_user_unreserve_address_range(addr_t address
, addr_t size
);
196 #endif /* _KERNEL_VM_VM_H */