1 /* $NetBSD: loadfile_machdep.c,v 1.6 2008/08/25 22:31:12 martin Exp $ */
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
7 * This work is based on the code contributed by Robert Drehmel to the
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <lib/libsa/stand.h>
34 #include <machine/pte.h>
35 #include <machine/cpu.h>
36 #include <machine/ctlreg.h>
37 #include <machine/vmparam.h>
38 #include <machine/promlib.h>
45 #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1))
46 #define lo(val) ((uint32_t)((val) & (uint32_t)-1))
48 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1)))
51 typedef int phandle_t
;
53 extern void itlb_enter(vaddr_t
, uint32_t, uint32_t);
54 extern void dtlb_enter(vaddr_t
, uint32_t, uint32_t);
55 extern void dtlb_replace(vaddr_t
, uint32_t, uint32_t);
56 extern vaddr_t
itlb_va_to_pa(vaddr_t
);
57 extern vaddr_t
dtlb_va_to_pa(vaddr_t
);
59 static void tlb_init(void);
61 static int mmu_mapin(vaddr_t
, vsize_t
);
62 static ssize_t
mmu_read(int, void *, size_t);
63 static void* mmu_memcpy(void *, const void *, size_t);
64 static void* mmu_memset(void *, int, size_t);
65 static void mmu_freeall(void);
67 static int ofw_mapin(vaddr_t
, vsize_t
);
68 static ssize_t
ofw_read(int, void *, size_t);
69 static void* ofw_memcpy(void *, const void *, size_t);
70 static void* ofw_memset(void *, int, size_t);
71 static void ofw_freeall(void);
73 static int nop_mapin(vaddr_t
, vsize_t
);
74 static ssize_t
nop_read(int, void *, size_t);
75 static void* nop_memcpy(void *, const void *, size_t);
76 static void* nop_memset(void *, int, size_t);
77 static void nop_freeall(void);
80 struct tlb_entry
*dtlb_store
= 0;
81 struct tlb_entry
*itlb_store
= 0;
88 static struct kvamap
{
94 ssize_t (* read
)(int f
, void *addr
, size_t size
);
95 void* (* memcpy
)(void *dst
, const void *src
, size_t size
);
96 void* (* memset
)(void *dst
, int c
, size_t size
);
97 void (* freeall
)(void);
99 { nop_read
, nop_memcpy
, nop_memset
, nop_freeall
},
100 { ofw_read
, ofw_memcpy
, ofw_memset
, ofw_freeall
},
101 { mmu_read
, mmu_memcpy
, mmu_memset
, mmu_freeall
}
104 static struct memsw
*memsw
= &memswa
[0];
108 * Check if a memory region is already mapped. Return length and virtual
109 * address of unmapped sub-region, if any.
112 kvamap_extract(vaddr_t va
, vsize_t len
, vaddr_t
*new_va
)
117 for (i
= 0; (len
> 0) && (i
< MAXSEGNUM
); i
++) {
118 if (kvamap
[i
].start
== NULL
)
120 if ((kvamap
[i
].start
<= va
) && (va
< kvamap
[i
].end
)) {
121 uint64_t va_len
= kvamap
[i
].end
- va
+ kvamap
[i
].start
;
122 len
= (va_len
< len
) ? len
- va_len
: 0;
123 *new_va
= kvamap
[i
].end
;
131 * Record new kernel mapping.
134 kvamap_enter(uint64_t va
, uint64_t len
)
138 DPRINTF(("kvamap_enter: %d@%p\n", (int)len
, (void*)(u_long
)va
));
139 for (i
= 0; (len
> 0) && (i
< MAXSEGNUM
); i
++) {
140 if (kvamap
[i
].start
== NULL
) {
141 kvamap
[i
].start
= va
;
142 kvamap
[i
].end
= va
+ len
;
147 if (i
== MAXSEGNUM
) {
148 panic("Too many allocations requested.");
153 * Initialize TLB as required by MMU mapping functions.
164 if (dtlb_store
!= NULL
) {
168 bootcpu
= get_cpuid();
170 if ( (root
= prom_findroot()) == -1) {
171 panic("tlb_init: prom_findroot()");
174 for (child
= prom_firstchild(root
); child
!= 0;
175 child
= prom_nextsibling(child
)) {
177 panic("tlb_init: OF_child");
179 if (_prom_getprop(child
, "device_type", buf
, sizeof(buf
)) > 0 &&
180 strcmp(buf
, "cpu") == 0) {
181 if (_prom_getprop(child
, "upa-portid", &cpu
,
182 sizeof(cpu
)) == -1 && _prom_getprop(child
, "portid",
183 &cpu
, sizeof(cpu
)) == -1)
184 panic("tlb_init: prom_getprop");
190 panic("tlb_init: no node for bootcpu?!?!");
191 if (_prom_getprop(child
, "#dtlb-entries", &dtlb_slot_max
,
192 sizeof(dtlb_slot_max
)) == -1 ||
193 _prom_getprop(child
, "#itlb-entries", &itlb_slot_max
,
194 sizeof(itlb_slot_max
)) == -1)
195 panic("tlb_init: prom_getprop");
196 dtlb_store
= alloc(dtlb_slot_max
* sizeof(*dtlb_store
));
197 itlb_store
= alloc(itlb_slot_max
* sizeof(*itlb_store
));
198 if (dtlb_store
== NULL
|| itlb_store
== NULL
) {
199 panic("tlb_init: malloc");
202 dtlb_slot
= itlb_slot
= 0;
206 * Map requested memory region with permanent 4MB pages.
209 mmu_mapin(vaddr_t rva
, vsize_t len
)
215 len
= roundup2(len
+ (rva
& PAGE_MASK_4M
), PAGE_SIZE_4M
);
216 rva
&= ~PAGE_MASK_4M
;
219 for (pa
= (paddr_t
)-1; len
> 0; rva
= va
) {
220 if ( (len
= kvamap_extract(rva
, len
, &va
)) == 0) {
221 /* The rest is already mapped */
225 if (dtlb_va_to_pa(va
) == (u_long
)-1 ||
226 itlb_va_to_pa(va
) == (u_long
)-1) {
227 /* Allocate a physical page, claim the virtual area */
228 if (pa
== (paddr_t
)-1) {
229 pa
= OF_alloc_phys(PAGE_SIZE_4M
, PAGE_SIZE_4M
);
230 if (pa
== (paddr_t
)-1)
231 panic("out of memory");
232 mva
= OF_claim_virt(va
, PAGE_SIZE_4M
);
234 panic("can't claim virtual page "
235 "(wanted %#lx, got %#lx)",
238 /* The mappings may have changed, be paranoid. */
243 * Actually, we can only allocate two pages less at
244 * most (depending on the kernel TSB size).
246 if (dtlb_slot
>= dtlb_slot_max
)
247 panic("mmu_mapin: out of dtlb_slots");
248 if (itlb_slot
>= itlb_slot_max
)
249 panic("mmu_mapin: out of itlb_slots");
251 DPRINTF(("mmu_mapin: %p:%p.%p\n", va
, hi(pa
), lo(pa
)));
253 data
= TSB_DATA(0, /* global */
254 PGSZ_4M
, /* 4mb page */
255 pa
, /* phys.address */
263 data
|= TLB_L
| TLB_CV
; /* locked, virt.cache */
265 dtlb_store
[dtlb_slot
].te_pa
= pa
;
266 dtlb_store
[dtlb_slot
].te_va
= va
;
268 dtlb_enter(va
, hi(data
), lo(data
));
272 kvamap_enter(va
, PAGE_SIZE_4M
);
274 len
-= len
> PAGE_SIZE_4M
? PAGE_SIZE_4M
: len
;
278 if (pa
!= (paddr_t
)-1) {
279 OF_free_phys(pa
, PAGE_SIZE_4M
);
286 mmu_read(int f
, void *addr
, size_t size
)
288 mmu_mapin((vaddr_t
)addr
, size
);
289 return read(f
, addr
, size
);
293 mmu_memcpy(void *dst
, const void *src
, size_t size
)
295 mmu_mapin((vaddr_t
)dst
, size
);
296 return memcpy(dst
, src
, size
);
300 mmu_memset(void *dst
, int c
, size_t size
)
302 mmu_mapin((vaddr_t
)dst
, size
);
303 return memset(dst
, c
, size
);
311 dtlb_slot
= itlb_slot
= 0;
312 for (i
= 0; i
< MAXSEGNUM
; i
++) {
313 /* XXX return all mappings to PROM and unmap the pages! */
314 kvamap
[i
].start
= kvamap
[i
].end
= 0;
319 * Claim requested memory region in OpenFirmware allocation pool.
322 ofw_mapin(vaddr_t rva
, vsize_t len
)
326 len
= roundup2(len
+ (rva
& PAGE_MASK_4M
), PAGE_SIZE_4M
);
327 rva
&= ~PAGE_MASK_4M
;
329 if ( (len
= kvamap_extract(rva
, len
, &va
)) != 0) {
330 if (OF_claim((void *)(long)va
, len
, PAGE_SIZE_4M
) == (void*)-1){
331 panic("ofw_mapin: Cannot claim memory.");
333 kvamap_enter(va
, len
);
340 ofw_read(int f
, void *addr
, size_t size
)
342 ofw_mapin((vaddr_t
)addr
, size
);
343 return read(f
, addr
, size
);
347 ofw_memcpy(void *dst
, const void *src
, size_t size
)
349 ofw_mapin((vaddr_t
)dst
, size
);
350 return memcpy(dst
, src
, size
);
354 ofw_memset(void *dst
, int c
, size_t size
)
356 ofw_mapin((vaddr_t
)dst
, size
);
357 return memset(dst
, c
, size
);
365 dtlb_slot
= itlb_slot
= 0;
366 for (i
= 0; i
< MAXSEGNUM
; i
++) {
367 OF_release((void*)(u_long
)kvamap
[i
].start
,
368 (u_int
)(kvamap
[i
].end
- kvamap
[i
].start
));
369 kvamap
[i
].start
= kvamap
[i
].end
= 0;
374 * NOP implementation exists solely for kernel header loading sake. Here
375 * we use alloc() interface to allocate memory and avoid doing some dangerous
379 nop_read(int f
, void *addr
, size_t size
)
381 return read(f
, addr
, size
);
385 nop_memcpy(void *dst
, const void *src
, size_t size
)
388 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
389 * right after the highest kernel address which will not be mapped with
390 * nop_XXX operations.
396 nop_memset(void *dst
, int c
, size_t size
)
398 return memset(dst
, c
, size
);
409 sparc64_read(int f
, void *addr
, size_t size
)
411 return (*memsw
->read
)(f
, addr
, size
);
415 sparc64_memcpy(void *dst
, const void *src
, size_t size
)
417 return (*memsw
->memcpy
)(dst
, src
, size
);
421 sparc64_memset(void *dst
, int c
, size_t size
)
423 return (*memsw
->memset
)(dst
, c
, size
);
427 * Remove write permissions from text mappings in the dTLB.
428 * Add entries in the iTLB.
431 sparc64_finalize_tlb(u_long data_va
)
435 bool writable_text
= false;
437 for (i
= 0; i
< dtlb_slot
; i
++) {
438 if (dtlb_store
[i
].te_va
>= data_va
) {
440 * If (for whatever reason) the start of the
441 * writable section is right at the start of
442 * the kernel, we need to map it into the ITLB
443 * nevertheless (and don't make it readonly).
445 if (i
== 0 && dtlb_store
[i
].te_va
== data_va
)
446 writable_text
= true;
451 data
= TSB_DATA(0, /* global */
452 PGSZ_4M
, /* 4mb page */
453 dtlb_store
[i
].te_pa
, /* phys.address */
461 data
|= TLB_L
| TLB_CV
; /* locked, virt.cache */
463 dtlb_replace(dtlb_store
[i
].te_va
, hi(data
), lo(data
));
464 itlb_store
[itlb_slot
] = dtlb_store
[i
];
466 itlb_enter(dtlb_store
[i
].te_va
, hi(data
), lo(data
));
469 printf("WARNING: kernel text mapped writable!\n");
473 * Record kernel mappings in bootinfo structure.
479 int itlb_size
, dtlb_size
;
480 struct btinfo_count bi_count
;
481 struct btinfo_tlb
*bi_itlb
, *bi_dtlb
;
483 bi_count
.count
= itlb_slot
;
484 bi_add(&bi_count
, BTINFO_ITLB_SLOTS
, sizeof(bi_count
));
485 bi_count
.count
= dtlb_slot
;
486 bi_add(&bi_count
, BTINFO_DTLB_SLOTS
, sizeof(bi_count
));
488 itlb_size
= sizeof(*bi_itlb
) + sizeof(struct tlb_entry
) * itlb_slot
;
489 dtlb_size
= sizeof(*bi_dtlb
) + sizeof(struct tlb_entry
) * dtlb_slot
;
491 bi_itlb
= alloc(itlb_size
);
492 bi_dtlb
= alloc(dtlb_size
);
494 if ((bi_itlb
== NULL
) || (bi_dtlb
== NULL
)) {
495 panic("Out of memory in sparc64_bi_add.\n");
498 for (i
= 0; i
< itlb_slot
; i
++) {
499 bi_itlb
->tlb
[i
].te_va
= itlb_store
[i
].te_va
;
500 bi_itlb
->tlb
[i
].te_pa
= itlb_store
[i
].te_pa
;
502 bi_add(bi_itlb
, BTINFO_ITLB
, itlb_size
);
504 for (i
= 0; i
< dtlb_slot
; i
++) {
505 bi_dtlb
->tlb
[i
].te_va
= dtlb_store
[i
].te_va
;
506 bi_dtlb
->tlb
[i
].te_pa
= dtlb_store
[i
].te_pa
;
508 bi_add(bi_dtlb
, BTINFO_DTLB
, dtlb_size
);
512 * Choose kernel image mapping strategy:
514 * LOADFILE_NOP_ALLOCATOR To load kernel image headers
515 * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means
516 * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings
519 loadfile_set_allocator(int type
)
521 if (type
>= (sizeof(memswa
) / sizeof(struct memsw
))) {
522 panic("Bad allocator request.\n");
526 * Release all memory claimed by previous allocator and schedule
527 * another allocator for succeeding memory allocation calls.
530 memsw
= &memswa
[type
];