btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / arch / generic / generic_vm_physical_page_mapper.cpp
blob1cd9e1f60c2df91105b636ec22f2dc58d64b634b
1 /*
2 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
10 #include "generic_vm_physical_page_mapper.h"
12 #include <vm/vm_page.h>
13 #include <vm/vm_priv.h>
14 #include <vm/VMAddressSpace.h>
15 #include <thread.h>
16 #include <util/queue.h>
18 #include <string.h>
19 #include <stdlib.h>
21 //#define TRACE_VM_PHYSICAL_PAGE_MAPPER
22 #ifdef TRACE_VM_PHYSICAL_PAGE_MAPPER
23 # define TRACE(x) dprintf x
24 #else
25 # define TRACE(x) ;
26 #endif
28 #define DEBUG_IO_SPACE
30 // data and structures used to represent physical pages mapped into iospace
31 typedef struct paddr_chunk_descriptor {
32 struct paddr_chunk_descriptor *next_q;
33 // must remain first in structure, queue code uses it
34 int32 ref_count;
35 addr_t va;
36 #ifdef DEBUG_IO_SPACE
37 thread_id last_ref;
38 #endif
39 } paddr_chunk_desc;
41 static paddr_chunk_desc *paddr_desc; // will be one per physical chunk
42 static paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chunk in iospace
43 static int first_free_vmapping;
44 static int num_virtual_chunks;
45 static queue mapped_paddr_lru;
46 static mutex sMutex = MUTEX_INITIALIZER("iospace_mutex");
47 static sem_id sChunkAvailableSem;
48 static int32 sChunkAvailableWaitingCounter;
50 static generic_map_iospace_chunk_func sMapIOSpaceChunk;
51 static addr_t sIOSpaceBase;
52 static size_t sIOSpaceSize;
53 static size_t sIOSpaceChunkSize;
56 status_t
57 generic_get_physical_page(phys_addr_t pa, addr_t *va, uint32 flags)
59 int index;
60 paddr_chunk_desc *replaced_pchunk;
62 restart:
63 mutex_lock(&sMutex);
65 // see if the page is already mapped
66 index = pa / sIOSpaceChunkSize;
67 if (paddr_desc[index].va != 0) {
68 if (paddr_desc[index].ref_count++ == 0) {
69 // pull this descriptor out of the lru list
70 queue_remove_item(&mapped_paddr_lru, &paddr_desc[index]);
72 *va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
73 mutex_unlock(&sMutex);
74 return B_OK;
77 // map it
78 if (first_free_vmapping < num_virtual_chunks) {
79 // there's a free hole
80 paddr_desc[index].va = first_free_vmapping * sIOSpaceChunkSize
81 + sIOSpaceBase;
82 *va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
83 virtual_pmappings[first_free_vmapping] = &paddr_desc[index];
84 paddr_desc[index].ref_count++;
86 // push up the first_free_vmapping pointer
87 for (; first_free_vmapping < num_virtual_chunks;
88 first_free_vmapping++) {
89 if (virtual_pmappings[first_free_vmapping] == NULL)
90 break;
93 sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize,
94 flags);
95 mutex_unlock(&sMutex);
97 return B_OK;
100 // replace an earlier mapping
101 if (queue_peek(&mapped_paddr_lru) == NULL) {
102 // no free slots available
103 if ((flags & PHYSICAL_PAGE_DONT_WAIT) != 0) {
104 // put back to the caller and let them handle this
105 mutex_unlock(&sMutex);
106 return B_NO_MEMORY;
107 } else {
108 sChunkAvailableWaitingCounter++;
110 mutex_unlock(&sMutex);
111 acquire_sem(sChunkAvailableSem);
112 goto restart;
116 replaced_pchunk = (paddr_chunk_desc*)queue_dequeue(&mapped_paddr_lru);
117 paddr_desc[index].va = replaced_pchunk->va;
118 replaced_pchunk->va = 0;
119 *va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
120 paddr_desc[index].ref_count++;
121 #ifdef DEBUG_IO_SPACE
122 paddr_desc[index].last_ref = thread_get_current_thread_id();
123 #endif
124 virtual_pmappings[(*va - sIOSpaceBase) / sIOSpaceChunkSize]
125 = paddr_desc + index;
127 sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize, flags);
129 mutex_unlock(&sMutex);
130 return B_OK;
134 status_t
135 generic_put_physical_page(addr_t va)
137 paddr_chunk_desc *desc;
139 if (va < sIOSpaceBase || va >= sIOSpaceBase + sIOSpaceSize)
140 panic("someone called put_physical_page on an invalid va 0x%lx\n", va);
141 va -= sIOSpaceBase;
143 mutex_lock(&sMutex);
145 desc = virtual_pmappings[va / sIOSpaceChunkSize];
146 if (desc == NULL) {
147 mutex_unlock(&sMutex);
148 panic("put_physical_page called on page at va 0x%lx which is not checked out\n",
149 va);
150 return B_ERROR;
153 if (--desc->ref_count == 0) {
154 // put it on the mapped lru list
155 queue_enqueue(&mapped_paddr_lru, desc);
157 if (sChunkAvailableWaitingCounter > 0) {
158 sChunkAvailableWaitingCounter--;
159 release_sem_etc(sChunkAvailableSem, 1, B_DO_NOT_RESCHEDULE);
163 mutex_unlock(&sMutex);
165 return B_OK;
169 #ifdef DEBUG_IO_SPACE
170 static int
171 dump_iospace(int argc, char** argv)
173 if (argc < 2) {
174 kprintf("usage: iospace <physical|virtual|queue>\n");
175 return 0;
178 int32 i;
180 if (strchr(argv[1], 'p')) {
181 // physical address descriptors
182 kprintf("I/O space physical descriptors (%p)\n", paddr_desc);
184 int32 max = vm_page_num_pages() / (sIOSpaceChunkSize / B_PAGE_SIZE);
185 if (argc == 3)
186 max = strtol(argv[2], NULL, 0);
188 for (i = 0; i < max; i++) {
189 kprintf("[%03lx %p %3ld %3ld] ", i, (void *)paddr_desc[i].va,
190 paddr_desc[i].ref_count, paddr_desc[i].last_ref);
191 if (i % 4 == 3)
192 kprintf("\n");
194 if (i % 4)
195 kprintf("\n");
198 if (strchr(argv[1], 'v')) {
199 // virtual mappings
200 kprintf("I/O space virtual chunk mappings (%p, first free: %d)\n",
201 virtual_pmappings, first_free_vmapping);
203 for (i = 0; i < num_virtual_chunks; i++) {
204 kprintf("[%2ld. %03lx] ", i, virtual_pmappings[i] - paddr_desc);
205 if (i % 8 == 7)
206 kprintf("\n");
208 if (i % 8)
209 kprintf("\n");
212 if (strchr(argv[1], 'q')) {
213 // unused queue
214 kprintf("I/O space mapped queue:\n");
216 paddr_chunk_descriptor* descriptor
217 = (paddr_chunk_descriptor *)queue_peek(&mapped_paddr_lru);
218 i = 0;
220 while (descriptor != NULL) {
221 kprintf("[%03lx %p] ",
222 (descriptor - paddr_desc) / sizeof(paddr_desc[0]), descriptor);
223 if (i++ % 8 == 7)
224 kprintf("\n");
226 descriptor = descriptor->next_q;
228 if (i % 8)
229 kprintf("\n");
232 return 0;
234 #endif
237 // #pragma mark -
240 status_t
241 generic_vm_physical_page_mapper_init(kernel_args *args,
242 generic_map_iospace_chunk_func mapIOSpaceChunk, addr_t *ioSpaceBase,
243 size_t ioSpaceSize, size_t ioSpaceChunkSize)
245 TRACE(("generic_vm_physical_page_mapper_init: entry\n"));
247 sMapIOSpaceChunk = mapIOSpaceChunk;
248 sIOSpaceSize = ioSpaceSize;
249 sIOSpaceChunkSize = ioSpaceChunkSize;
251 // reserve virtual space for the IO space
252 sIOSpaceBase = vm_allocate_early(args, sIOSpaceSize, 0, 0,
253 ioSpaceChunkSize);
254 if (sIOSpaceBase == 0) {
255 panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
256 "space in virtual address space!");
257 return B_ERROR;
260 *ioSpaceBase = sIOSpaceBase;
262 // allocate some space to hold physical page mapping info
263 paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
264 sizeof(paddr_chunk_desc) * 1024, ~0L,
265 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
266 num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
267 virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
268 sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
269 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
271 TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
272 paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
274 // initialize our data structures
275 memset(paddr_desc, 0, sizeof(paddr_chunk_desc) * 1024);
276 memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);
277 first_free_vmapping = 0;
278 queue_init(&mapped_paddr_lru);
279 sChunkAvailableSem = -1;
281 TRACE(("generic_vm_physical_page_mapper_init: done\n"));
283 return B_OK;
287 status_t
288 generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
290 void *temp;
292 TRACE(("generic_vm_physical_page_mapper_init_post_area: entry\n"));
294 temp = (void *)paddr_desc;
295 create_area("physical_page_mapping_descriptors", &temp, B_EXACT_ADDRESS,
296 ROUNDUP(sizeof(paddr_chunk_desc) * 1024, B_PAGE_SIZE),
297 B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
299 temp = (void *)virtual_pmappings;
300 create_area("iospace_virtual_chunk_descriptors", &temp, B_EXACT_ADDRESS,
301 ROUNDUP(sizeof(paddr_chunk_desc *) * num_virtual_chunks, B_PAGE_SIZE),
302 B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
304 TRACE(("generic_vm_physical_page_mapper_init_post_area: creating iospace\n"));
305 temp = (void *)sIOSpaceBase;
306 area_id ioSpaceArea = vm_create_null_area(VMAddressSpace::KernelID(),
307 "iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize,
308 CREATE_AREA_PRIORITY_VIP);
309 if (ioSpaceArea < 0) {
310 panic("generic_vm_physical_page_mapper_init_post_area(): Failed to "
311 "create null area for IO space!\n");
312 return ioSpaceArea;
315 TRACE(("generic_vm_physical_page_mapper_init_post_area: done\n"));
317 #ifdef DEBUG_IO_SPACE
318 add_debugger_command("iospace", &dump_iospace, "Shows info about the I/O space area.");
319 #endif
321 return B_OK;
325 status_t
326 generic_vm_physical_page_mapper_init_post_sem(kernel_args *args)
328 sChunkAvailableSem = create_sem(1, "iospace chunk available");
330 return sChunkAvailableSem >= B_OK ? B_OK : sChunkAvailableSem;