1 /* $NetBSD: vmem.c,v 1.6 2007/03/04 05:59:53 christos Exp $ */
4 * Copyright (c) 1999 Shin Takemura.
7 * This software is part of the PocketBSD.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the PocketBSD project
20 * and its contributors.
21 * 4. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 struct page_header_s
{
51 struct map_s
*map
= NULL
;
52 struct addr_s
*phys_addrs
= NULL
;
53 unsigned char* heap
= NULL
;
59 vmem_exec(void *entry
, int argc
, char *argv
[], struct bootinfo
*bi
)
65 debug_printf(TEXT("vmem is not initialized.\n"));
66 msg_printf(MSG_ERROR
, whoami
, TEXT("vmem is not initialized.\n"));
70 debug_printf(TEXT("entry point=0x%x\n"), entry
);
73 map
->base
= kernel_start
;
75 for (i
= 0; i
< argc
; i
++) {
76 argv
[i
] = vtophysaddr(argv
[i
]);
78 map
->arg0
= (void *)argc
;
79 map
->arg1
= vtophysaddr((void *)argv
);
80 map
->arg2
= vtophysaddr((void *)bi
);
83 if (map
->arg1
== NULL
|| map
->arg2
== NULL
) {
84 debug_printf(TEXT("arg, vtophysaddr() failed\n"));
85 msg_printf(MSG_ERROR
, whoami
,
86 TEXT("arg, vtophysaddr() failed\n"));
90 for (i
= 0; p
= map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
]; i
++) {
91 if ((p
= vtophysaddr(p
)) == NULL
) {
92 debug_printf(TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
93 i
, map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
]);
94 msg_printf(MSG_ERROR
, whoami
,
95 TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
96 i
, map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
]);
99 map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
] = p
;
102 for (i
= 0; i
< map
->nleaves
; i
++) {
103 if ((p
= vtophysaddr((void *)map
->leaf
[i
])) == NULL
) {
104 debug_printf(TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
105 i
, map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
]);
106 msg_printf(MSG_ERROR
, whoami
,
107 TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
108 i
, map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
]);
111 map
->leaf
[i
] = (void **)p
;
114 debug_printf(TEXT("execute startprog()\n"));
117 return ((*system_info
.si_boot
)(vtophysaddr((void *)map
)));
124 struct page_header_s
*page
;
126 pagesize
= system_info
.si_pagesize
;
127 for (i
= 0; i
< npages
; i
++) {
128 page
= (struct page_header_s
*)&heap
[pagesize
* i
];
129 if (!phys_addrs
[i
].in_use
&&
130 !(kernel_start
<= phys_addrs
[i
].addr
&&
131 phys_addrs
[i
].addr
< kernel_end
)) {
132 phys_addrs
[i
].in_use
= 1;
133 return ((void *)page
);
140 alloc_kpage(void *phys_addr
)
143 struct page_header_s
*page
;
145 pagesize
= system_info
.si_pagesize
;
146 for (i
= 0; i
< npages
; i
++) {
147 page
= (struct page_header_s
*)&heap
[pagesize
* i
];
148 if (phys_addrs
[i
].addr
== phys_addr
) {
149 if (phys_addrs
[i
].in_use
) {
150 debug_printf(TEXT("page %d (phys addr=0x%x) is already in use\n"),
152 msg_printf(MSG_ERROR
, whoami
,
153 TEXT("page %d (phys addr=0x%x) is already in use\n"),
157 phys_addrs
[i
].in_use
= 1;
158 return ((void *)page
);
161 return (vmem_alloc());
165 vmem_get(void *phys_addr
, int *length
)
167 int pagesize
= system_info
.si_pagesize
;
168 int pageno
= (phys_addr
- kernel_start
) / pagesize
;
169 int offset
= (phys_addr
- kernel_start
) % pagesize
;
171 if (map
== NULL
|| pageno
< 0 || npages
<= pageno
) {
175 *length
= pagesize
- offset
;
177 return (map
->leaf
[pageno
/ map
->leafsize
][pageno
% map
->leafsize
] + offset
);
181 vtophysaddr(void *page
)
183 int pageno
= (page
- heap
) / system_info
.si_pagesize
;
184 int offset
= (page
- heap
) % system_info
.si_pagesize
;
186 if (map
== NULL
|| pageno
< 0 || npages
<= pageno
) {
189 return (phys_addrs
[pageno
].addr
+ offset
);
193 vmem_init(void *start
, void *end
)
195 #define MEM_BLOCK_SIZE (1024*1024*4) /* must be greater than page size */
197 unsigned long magic0
;
198 unsigned long magic1
;
200 struct page_header_s
*page
;
203 int pagesize
, memblocks
;
205 pagesize
= system_info
.si_pagesize
;
206 memblocks
= (system_info
.si_drammaxsize
) / MEM_BLOCK_SIZE
;
208 /* align with page size */
209 start
= (void *)(((long)start
/ pagesize
) * pagesize
);
210 end
= (void *)((((long)end
+ pagesize
- 1) / pagesize
) * pagesize
);
212 kernel_start
= start
;
217 * program image pages.
219 npages
= (size
+ pagesize
- 1) / pagesize
;
223 * npages plus one for end mark.
225 npages
+= (nleaves
= ((npages
* sizeof(void *) + pagesize
) / pagesize
));
228 * map root page, startprg code page, argument page and bootinfo page.
235 debug_printf(TEXT("allocate %d pages\n"), npages
);
236 heap
= (unsigned char*)
240 PAGE_READWRITE
| PAGE_NOCACHE
);
242 debug_printf(TEXT("can't allocate heap\n"));
243 msg_printf(MSG_ERROR
, whoami
, TEXT("can't allocate heap\n"));
248 * allocate address table.
250 phys_addrs
= (struct addr_s
*)
252 npages
* sizeof(struct addr_s
),
255 if (phys_addrs
== NULL
) {
256 debug_printf(TEXT("can't allocate address table\n"));
257 msg_printf(MSG_ERROR
, whoami
, TEXT("can't allocate address table\n"));
262 * set magic number for each page in buffer.
266 debug_printf(TEXT("magic=%08x%08x\n"), magic0
, magic1
);
268 for (i
= 0; i
< npages
; i
++) {
269 page
= (struct page_header_s
*)&heap
[pagesize
* i
];
270 page
->magic0
= magic0
;
272 page
->magic1
= magic1
;
273 phys_addrs
[i
].addr
= 0;
274 phys_addrs
[i
].in_use
= 0;
278 * Scan whole physical memory.
281 for (m
= 0; (m
< memblocks
) && (nfounds
< npages
); m
++) {
283 /* Map physical memory block */
284 mem
= (unsigned char*)VirtualAlloc(0, MEM_BLOCK_SIZE
,
285 MEM_RESERVE
, PAGE_NOACCESS
);
286 if(!VirtualCopy((LPVOID
)mem
, (LPVOID
)
287 ((system_info
.si_dramstart
+ MEM_BLOCK_SIZE
* m
) >> 8),
289 PAGE_READWRITE
| PAGE_NOCACHE
| PAGE_PHYSICAL
)) {
290 VirtualFree(mem
, 0, MEM_RELEASE
);
293 /* Find preliminary allocated pages */
294 for (i
= 0; i
< (int)(MEM_BLOCK_SIZE
/ pagesize
); i
++) {
295 page
= (struct page_header_s
*)&mem
[pagesize
* i
];
296 if (page
->magic0
== magic0
&&
297 page
->magic1
== magic1
) {
298 pageno
= page
->pageno
;
299 if (0 <= pageno
&& pageno
< npages
&&
300 phys_addrs
[pageno
].addr
== 0) {
301 /* Set kernel virtual addr. XXX mips dependent */
302 phys_addrs
[pageno
].addr
= (unsigned char*)
304 system_info
.si_dramstart
) +
309 if (npages
<= ++nfounds
) {
313 debug_printf(TEXT("invalid page header\n"));
314 msg_printf(MSG_ERROR
, whoami
, TEXT("invalid page header\n"));
319 VirtualFree(mem
, 0, MEM_RELEASE
);
322 if (nfounds
< npages
) {
323 debug_printf(TEXT("lost %d pages\n"), npages
- nfounds
);
324 msg_printf(MSG_ERROR
, whoami
,
325 TEXT("lost %d pages (allocated %d pages)\n"),
326 npages
- nfounds
, npages
);
333 if ((map
= (struct map_s
*)vmem_alloc()) == NULL
) {
334 debug_printf(TEXT("can't allocate root page.\n"));
335 msg_printf(MSG_ERROR
, whoami
, TEXT("can't allocate root page.\n"));
338 map
->nleaves
= nleaves
;
339 map
->leafsize
= pagesize
/ sizeof(void *);
340 map
->pagesize
= pagesize
;
343 * allocate leaf pages
345 for (i
= 0; i
< nleaves
; i
++) {
346 if ((map
->leaf
[i
] = (void **)vmem_alloc()) == NULL
) {
347 debug_printf(TEXT("can't allocate leaf page.\n"));
348 msg_printf(MSG_ERROR
, whoami
, TEXT("can't allocate leaf page.\n"));
354 * allocate kernel pages
356 for (i
= 0; start
< kernel_end
; start
+= pagesize
, i
++) {
357 void **leaf
= map
->leaf
[i
/ map
->leafsize
];
358 if ((leaf
[i
% map
->leafsize
] = alloc_kpage(start
)) == NULL
) {
359 debug_printf(TEXT("can't allocate page 0x%x.\n"), start
);
360 msg_printf(MSG_ERROR
, whoami
, TEXT("can't allocate page 0x%x.\n"), start
);
364 map
->leaf
[i
/ map
->leafsize
][i
% map
->leafsize
] = NULL
; /* END MARK */
379 VirtualFree(heap
, 0, MEM_RELEASE
);
383 VirtualFree(phys_addrs
, 0, MEM_RELEASE
);
391 void *addr
, page
, paddr
;
394 debug_printf(TEXT("no page map\n"));
398 for (addr
= kernel_start
; addr
< kernel_end
; addr
+= system_info
.si_pagesize
) {
399 page
= vmem_get(addr
, NULL
);
400 paddr
= vtophysaddr(page
);
401 debug_printf(TEXT("%08X: vaddr=%08X paddr=%08X %s\n"),
402 addr
, page
, paddr
, addr
== paddr
? TEXT("*") : TEXT("reloc"));