2 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
10 #include "generic_vm_physical_page_mapper.h"
12 #include <vm/vm_page.h>
13 #include <vm/vm_priv.h>
14 #include <vm/VMAddressSpace.h>
16 #include <util/queue.h>
21 //#define TRACE_VM_PHYSICAL_PAGE_MAPPER
22 #ifdef TRACE_VM_PHYSICAL_PAGE_MAPPER
23 # define TRACE(x) dprintf x
28 #define DEBUG_IO_SPACE
30 // data and structures used to represent physical pages mapped into iospace
31 typedef struct paddr_chunk_descriptor
{
32 struct paddr_chunk_descriptor
*next_q
;
33 // must remain first in structure, queue code uses it
41 static paddr_chunk_desc
*paddr_desc
; // will be one per physical chunk
42 static paddr_chunk_desc
**virtual_pmappings
; // will be one ptr per virtual chunk in iospace
43 static int first_free_vmapping
;
44 static int num_virtual_chunks
;
45 static queue mapped_paddr_lru
;
46 static mutex sMutex
= MUTEX_INITIALIZER("iospace_mutex");
47 static sem_id sChunkAvailableSem
;
48 static int32 sChunkAvailableWaitingCounter
;
50 static generic_map_iospace_chunk_func sMapIOSpaceChunk
;
51 static addr_t sIOSpaceBase
;
52 static size_t sIOSpaceSize
;
53 static size_t sIOSpaceChunkSize
;
57 generic_get_physical_page(phys_addr_t pa
, addr_t
*va
, uint32 flags
)
60 paddr_chunk_desc
*replaced_pchunk
;
65 // see if the page is already mapped
66 index
= pa
/ sIOSpaceChunkSize
;
67 if (paddr_desc
[index
].va
!= 0) {
68 if (paddr_desc
[index
].ref_count
++ == 0) {
69 // pull this descriptor out of the lru list
70 queue_remove_item(&mapped_paddr_lru
, &paddr_desc
[index
]);
72 *va
= paddr_desc
[index
].va
+ pa
% sIOSpaceChunkSize
;
73 mutex_unlock(&sMutex
);
78 if (first_free_vmapping
< num_virtual_chunks
) {
79 // there's a free hole
80 paddr_desc
[index
].va
= first_free_vmapping
* sIOSpaceChunkSize
82 *va
= paddr_desc
[index
].va
+ pa
% sIOSpaceChunkSize
;
83 virtual_pmappings
[first_free_vmapping
] = &paddr_desc
[index
];
84 paddr_desc
[index
].ref_count
++;
86 // push up the first_free_vmapping pointer
87 for (; first_free_vmapping
< num_virtual_chunks
;
88 first_free_vmapping
++) {
89 if (virtual_pmappings
[first_free_vmapping
] == NULL
)
93 sMapIOSpaceChunk(paddr_desc
[index
].va
, index
* sIOSpaceChunkSize
,
95 mutex_unlock(&sMutex
);
100 // replace an earlier mapping
101 if (queue_peek(&mapped_paddr_lru
) == NULL
) {
102 // no free slots available
103 if ((flags
& PHYSICAL_PAGE_DONT_WAIT
) != 0) {
104 // put back to the caller and let them handle this
105 mutex_unlock(&sMutex
);
108 sChunkAvailableWaitingCounter
++;
110 mutex_unlock(&sMutex
);
111 acquire_sem(sChunkAvailableSem
);
116 replaced_pchunk
= (paddr_chunk_desc
*)queue_dequeue(&mapped_paddr_lru
);
117 paddr_desc
[index
].va
= replaced_pchunk
->va
;
118 replaced_pchunk
->va
= 0;
119 *va
= paddr_desc
[index
].va
+ pa
% sIOSpaceChunkSize
;
120 paddr_desc
[index
].ref_count
++;
121 #ifdef DEBUG_IO_SPACE
122 paddr_desc
[index
].last_ref
= thread_get_current_thread_id();
124 virtual_pmappings
[(*va
- sIOSpaceBase
) / sIOSpaceChunkSize
]
125 = paddr_desc
+ index
;
127 sMapIOSpaceChunk(paddr_desc
[index
].va
, index
* sIOSpaceChunkSize
, flags
);
129 mutex_unlock(&sMutex
);
135 generic_put_physical_page(addr_t va
)
137 paddr_chunk_desc
*desc
;
139 if (va
< sIOSpaceBase
|| va
>= sIOSpaceBase
+ sIOSpaceSize
)
140 panic("someone called put_physical_page on an invalid va 0x%lx\n", va
);
145 desc
= virtual_pmappings
[va
/ sIOSpaceChunkSize
];
147 mutex_unlock(&sMutex
);
148 panic("put_physical_page called on page at va 0x%lx which is not checked out\n",
153 if (--desc
->ref_count
== 0) {
154 // put it on the mapped lru list
155 queue_enqueue(&mapped_paddr_lru
, desc
);
157 if (sChunkAvailableWaitingCounter
> 0) {
158 sChunkAvailableWaitingCounter
--;
159 release_sem_etc(sChunkAvailableSem
, 1, B_DO_NOT_RESCHEDULE
);
163 mutex_unlock(&sMutex
);
169 #ifdef DEBUG_IO_SPACE
171 dump_iospace(int argc
, char** argv
)
174 kprintf("usage: iospace <physical|virtual|queue>\n");
180 if (strchr(argv
[1], 'p')) {
181 // physical address descriptors
182 kprintf("I/O space physical descriptors (%p)\n", paddr_desc
);
184 int32 max
= vm_page_num_pages() / (sIOSpaceChunkSize
/ B_PAGE_SIZE
);
186 max
= strtol(argv
[2], NULL
, 0);
188 for (i
= 0; i
< max
; i
++) {
189 kprintf("[%03lx %p %3ld %3ld] ", i
, (void *)paddr_desc
[i
].va
,
190 paddr_desc
[i
].ref_count
, paddr_desc
[i
].last_ref
);
198 if (strchr(argv
[1], 'v')) {
200 kprintf("I/O space virtual chunk mappings (%p, first free: %d)\n",
201 virtual_pmappings
, first_free_vmapping
);
203 for (i
= 0; i
< num_virtual_chunks
; i
++) {
204 kprintf("[%2ld. %03lx] ", i
, virtual_pmappings
[i
] - paddr_desc
);
212 if (strchr(argv
[1], 'q')) {
214 kprintf("I/O space mapped queue:\n");
216 paddr_chunk_descriptor
* descriptor
217 = (paddr_chunk_descriptor
*)queue_peek(&mapped_paddr_lru
);
220 while (descriptor
!= NULL
) {
221 kprintf("[%03lx %p] ",
222 (descriptor
- paddr_desc
) / sizeof(paddr_desc
[0]), descriptor
);
226 descriptor
= descriptor
->next_q
;
241 generic_vm_physical_page_mapper_init(kernel_args
*args
,
242 generic_map_iospace_chunk_func mapIOSpaceChunk
, addr_t
*ioSpaceBase
,
243 size_t ioSpaceSize
, size_t ioSpaceChunkSize
)
245 TRACE(("generic_vm_physical_page_mapper_init: entry\n"));
247 sMapIOSpaceChunk
= mapIOSpaceChunk
;
248 sIOSpaceSize
= ioSpaceSize
;
249 sIOSpaceChunkSize
= ioSpaceChunkSize
;
251 // reserve virtual space for the IO space
252 sIOSpaceBase
= vm_allocate_early(args
, sIOSpaceSize
, 0, 0,
254 if (sIOSpaceBase
== 0) {
255 panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
256 "space in virtual address space!");
260 *ioSpaceBase
= sIOSpaceBase
;
262 // allocate some space to hold physical page mapping info
263 paddr_desc
= (paddr_chunk_desc
*)vm_allocate_early(args
,
264 sizeof(paddr_chunk_desc
) * 1024, ~0L,
265 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0);
266 num_virtual_chunks
= sIOSpaceSize
/ sIOSpaceChunkSize
;
267 virtual_pmappings
= (paddr_chunk_desc
**)vm_allocate_early(args
,
268 sizeof(paddr_chunk_desc
*) * num_virtual_chunks
, ~0L,
269 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0);
271 TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
272 paddr_desc
, virtual_pmappings
/*, iospace_pgtables*/));
274 // initialize our data structures
275 memset(paddr_desc
, 0, sizeof(paddr_chunk_desc
) * 1024);
276 memset(virtual_pmappings
, 0, sizeof(paddr_chunk_desc
*) * num_virtual_chunks
);
277 first_free_vmapping
= 0;
278 queue_init(&mapped_paddr_lru
);
279 sChunkAvailableSem
= -1;
281 TRACE(("generic_vm_physical_page_mapper_init: done\n"));
288 generic_vm_physical_page_mapper_init_post_area(kernel_args
*args
)
292 TRACE(("generic_vm_physical_page_mapper_init_post_area: entry\n"));
294 temp
= (void *)paddr_desc
;
295 create_area("physical_page_mapping_descriptors", &temp
, B_EXACT_ADDRESS
,
296 ROUNDUP(sizeof(paddr_chunk_desc
) * 1024, B_PAGE_SIZE
),
297 B_ALREADY_WIRED
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
299 temp
= (void *)virtual_pmappings
;
300 create_area("iospace_virtual_chunk_descriptors", &temp
, B_EXACT_ADDRESS
,
301 ROUNDUP(sizeof(paddr_chunk_desc
*) * num_virtual_chunks
, B_PAGE_SIZE
),
302 B_ALREADY_WIRED
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
304 TRACE(("generic_vm_physical_page_mapper_init_post_area: creating iospace\n"));
305 temp
= (void *)sIOSpaceBase
;
306 area_id ioSpaceArea
= vm_create_null_area(VMAddressSpace::KernelID(),
307 "iospace", &temp
, B_EXACT_ADDRESS
, sIOSpaceSize
,
308 CREATE_AREA_PRIORITY_VIP
);
309 if (ioSpaceArea
< 0) {
310 panic("generic_vm_physical_page_mapper_init_post_area(): Failed to "
311 "create null area for IO space!\n");
315 TRACE(("generic_vm_physical_page_mapper_init_post_area: done\n"));
317 #ifdef DEBUG_IO_SPACE
318 add_debugger_command("iospace", &dump_iospace
, "Shows info about the I/O space area.");
326 generic_vm_physical_page_mapper_init_post_sem(kernel_args
*args
)
328 sChunkAvailableSem
= create_sem(1, "iospace chunk available");
330 return sChunkAvailableSem
>= B_OK
? B_OK
: sChunkAvailableSem
;