1 /* $NetBSD: oea_machdep.c,v 1.50 2009/11/26 00:19:20 matt Exp $ */
4 * Copyright (C) 2002 Matt Thomas
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.50 2009/11/26 00:19:20 matt Exp $");
38 #include "opt_ppcarch.h"
39 #include "opt_compat_netbsd.h"
42 #include "opt_ipkdb.h"
43 #include "opt_multiprocessor.h"
44 #include "opt_altivec.h"
46 #include <sys/param.h>
49 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/msgbuf.h>
54 #include <sys/reboot.h>
55 #include <sys/syscallargs.h>
56 #include <sys/syslog.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/boot_flag.h>
61 #include <uvm/uvm_extern.h>
63 #include <net/netisr.h>
66 #include <machine/db_machdep.h>
67 #include <ddb/db_extern.h>
75 #include <ipkdb/ipkdb.h>
78 #include <powerpc/oea/bat.h>
79 #include <powerpc/oea/sr_601.h>
80 #include <powerpc/oea/cpufeat.h>
81 #include <powerpc/trap.h>
82 #include <powerpc/stdarg.h>
83 #include <powerpc/spr.h>
84 #include <powerpc/pte.h>
85 #include <powerpc/altivec.h>
86 #include <machine/powerpc.h>
88 char machine
[] = MACHINE
; /* from <machine/param.h> */
89 char machine_arch
[] = MACHINE_ARCH
; /* from <machine/param.h> */
91 struct vm_map
*mb_map
= NULL
;
92 struct vm_map
*phys_map
= NULL
;
95 * Global variables used here and there
97 static void trap0(void *);
99 /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */
100 struct bat battable
[512];
102 register_t iosrtable
[16]; /* I/O segments, for kernel_pmap setup */
104 paddr_t msgbuf_paddr
;
108 oea_init(void (*handler
)(void))
110 extern int trapcode
[], trapsize
[];
111 extern int sctrap
[], scsize
[];
112 extern int alitrap
[], alisize
[];
113 extern int dsitrap
[], dsisize
[];
114 extern int trapstart
[], trapend
[];
116 extern int dsi601trap
[], dsi601size
[];
118 extern int decrint
[], decrsize
[];
119 extern int tlbimiss
[], tlbimsize
[];
120 extern int tlbdlmiss
[], tlbdlmsize
[];
121 extern int tlbdsmiss
[], tlbdsmsize
[];
122 #if defined(DDB) || defined(KGDB)
123 extern int ddblow
[], ddbsize
[];
126 extern int ipkdblow
[], ipkdbsize
[];
131 uintptr_t exc
, exc_base
;
132 #if defined(ALTIVEC) || defined(PPC_OEA)
135 unsigned int cpuvers
;
137 struct cpu_info
* const ci
= &cpu_info
[0];
140 exc_base
= EXC_HIGHVEC
;
144 mtspr(SPR_SPRG0
, ci
);
145 cpuvers
= mfpvr() >> 16;
148 * Initialize proc0 and current pcb and pmap pointers.
151 KASSERT(curcpu() == ci
);
154 curpcb
= lwp_getpcb(&lwp0
);
155 memset(curpcb
, 0, sizeof(struct pcb
));
159 * Initialize the vectors with NaNs
161 for (scratch
= 0; scratch
< 32; scratch
++) {
162 curpcb
->pcb_vr
.vreg
[scratch
][0] = 0x7FFFDEAD;
163 curpcb
->pcb_vr
.vreg
[scratch
][1] = 0x7FFFDEAD;
164 curpcb
->pcb_vr
.vreg
[scratch
][2] = 0x7FFFDEAD;
165 curpcb
->pcb_vr
.vreg
[scratch
][3] = 0x7FFFDEAD;
167 curpcb
->pcb_vr
.vscr
= 0;
168 curpcb
->pcb_vr
.vrsave
= 0;
170 curpm
= curpcb
->pcb_pm
= pmap_kernel();
173 * Cause a PGM trap if we branch to 0.
175 * XXX GCC4.1 complains about memset on address zero, so
176 * don't use the builtin.
182 * Set up trap vectors. Don't assume vectors are on 0x100.
184 for (exc
= exc_base
; exc
<= exc_base
+ EXC_LAST
; exc
+= 0x100) {
185 switch (exc
- exc_base
) {
187 size
= (size_t)trapsize
;
188 memcpy((void *)exc
, trapcode
, size
);
193 * This one is (potentially) installed during autoconf
198 size
= (size_t)scsize
;
199 memcpy((void *)exc
, sctrap
, size
);
202 size
= (size_t)alisize
;
203 memcpy((void *)exc
, alitrap
, size
);
207 if (cpuvers
== MPC601
) {
208 size
= (size_t)dsi601size
;
209 memcpy((void *)exc
, dsi601trap
, size
);
212 #endif /* PPC_OEA601 */
213 if (oeacpufeat
& OEACPU_NOBAT
) {
214 size
= (size_t)alisize
;
215 memcpy((void *)exc
, alitrap
, size
);
217 size
= (size_t)dsisize
;
218 memcpy((void *)exc
, dsitrap
, size
);
222 size
= (size_t)decrsize
;
223 memcpy((void *)exc
, decrint
, size
);
226 size
= (size_t)tlbimsize
;
227 memcpy((void *)exc
, tlbimiss
, size
);
230 size
= (size_t)tlbdlmsize
;
231 memcpy((void *)exc
, tlbdlmiss
, size
);
234 size
= (size_t)tlbdsmsize
;
235 memcpy((void *)exc
, tlbdsmiss
, size
);
238 size
= (size_t)trapsize
;
239 memcpy((void *)exc
, trapcode
, size
);
240 memcpy((void *)(exc_base
+ EXC_VEC
), trapcode
, size
);
242 #if defined(DDB) || defined(IPKDB) || defined(KGDB)
245 if (cpuvers
!= MPC601
) {
247 size
= (size_t)trapsize
;
248 memcpy((void *)exc
, trapcode
, size
);
257 #if defined(DDB) || defined(KGDB)
258 size
= (size_t)ddbsize
;
259 memcpy((void *)exc
, ddblow
, size
);
261 #error "cannot enable IPKDB with DDB or KGDB"
264 size
= (size_t)ipkdbsize
;
265 memcpy((void *)exc
, ipkdblow
, size
);
268 #endif /* DDB || IPKDB || KGDB */
271 exc
+= roundup(size
, 32);
276 * Install a branch absolute to trap0 to force a panic.
278 if ((uintptr_t)trap0
< 0x2000000) {
279 *(uint32_t *) 0 = 0x7c6802a6;
280 *(uint32_t *) 4 = 0x48000002 | (uintptr_t) trap0
;
284 * Get the cache sizes because install_extint calls __syncicache.
288 #define MxSPR_MASK 0x7c1fffff
289 #define MFSPR_MQ 0x7c0002a6
290 #define MTSPR_MQ 0x7c0003a6
291 #define MTSPR_IBAT0L 0x7c1183a6
292 #define MTSPR_IBAT1L 0x7c1383a6
293 #define NOP 0x60000000
295 #define TLBSYNC 0x7c00046c
296 #define SYNC 0x7c0004ac
299 #define MFSPR_VRSAVE 0x7c0042a6
300 #define MTSPR_VRSAVE 0x7c0043a6
303 * Try to set the VEC bit in the MSR. If it doesn't get set, we are
304 * not on a AltiVec capable processor.
307 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; "
308 "mfmsr %1; mtmsr %0; isync"
309 : "=r"(msr
), "=r"(scratch
)
313 * If we aren't on an AltiVec capable processor, we need to zap any of
314 * the sequences we save/restore the VRSAVE SPR into NOPs.
316 if (scratch
& PSL_VEC
) {
321 for (; ip
< trapend
; ip
++) {
322 if ((ip
[0] & MxSPR_MASK
) == MFSPR_VRSAVE
) {
323 ip
[0] = NOP
; /* mfspr */
324 ip
[1] = NOP
; /* stw */
325 } else if ((ip
[0] & MxSPR_MASK
) == MTSPR_VRSAVE
) {
326 ip
[-1] = NOP
; /* lwz */
327 ip
[0] = NOP
; /* mtspr */
333 /* XXX It would seem like this code could be elided ifndef 601, but
334 * doing so breaks my power3 machine.
337 * If we aren't on a MPC601 processor, we need to zap any of the
338 * sequences we save/restore the MQ SPR into NOPs, and skip over the
339 * sequences where we zap/restore BAT registers on kernel exit/entry.
341 if (cpuvers
!= MPC601
) {
344 for (; ip
< trapend
; ip
++) {
345 if ((ip
[0] & MxSPR_MASK
) == MFSPR_MQ
) {
346 ip
[0] = NOP
; /* mfspr */
347 ip
[1] = NOP
; /* stw */
348 } else if ((ip
[0] & MxSPR_MASK
) == MTSPR_MQ
) {
349 ip
[-1] = NOP
; /* lwz */
350 ip
[0] = NOP
; /* mtspr */
351 } else if ((ip
[0] & MxSPR_MASK
) == MTSPR_IBAT0L
) {
352 if ((ip
[1] & MxSPR_MASK
) == MTSPR_IBAT1L
)
353 ip
[-1] = B
| 0x14; /* li */
355 ip
[-4] = B
| 0x24; /* lis */
361 * Sync the changed instructions.
363 __syncicache((void *) trapstart
,
364 (uintptr_t) trapend
- (uintptr_t) trapstart
);
368 * If we are on a MPC601 processor, we need to zap any tlbsync
369 * instructions into sync. This differs from the above in
370 * examing all kernel text, as opposed to just the exception handling.
371 * We sync the icache on every instruction found since there are
372 * only very few of them.
374 if (cpuvers
== MPC601
) {
375 extern int kernel_text
[], etext
[];
378 for (ip
= kernel_text
; ip
< etext
; ip
++)
379 if (*ip
== TLBSYNC
) {
381 __syncicache(ip
, sizeof(*ip
));
384 #endif /* PPC_OEA601 */
387 * Configure a PSL user mask matching this processor.
389 cpu_psluserset
= PSL_EE
| PSL_PR
| PSL_ME
| PSL_IR
| PSL_DR
| PSL_RI
;
390 cpu_pslusermod
= PSL_FP
| PSL_FE0
| PSL_FE1
| PSL_LE
| PSL_SE
| PSL_BE
;
392 if (cpuvers
== MPC601
) {
393 cpu_psluserset
&= PSL_601_MASK
;
394 cpu_pslusermod
&= PSL_601_MASK
;
399 cpu_pslusermod
|= PSL_VEC
;
402 cpu_psluserset
|= PSL_IP
; /* XXX ok? */
406 * external interrupt handler install
409 oea_install_extint(handler
);
411 __syncicache((void *)exc_base
, EXC_LAST
+ 0x100);
414 * Now enable translation (and machine checks/recoverable interrupts).
417 __asm
volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync"
419 : "K"(PSL_IR
|PSL_DR
|PSL_ME
|PSL_RI
));
422 KASSERT(curcpu() == ci
);
427 mpc601_ioseg_add(paddr_t pa
, register_t len
)
429 const u_int i
= pa
>> ADDR_SR_SHFT
;
431 if (len
!= BAT_BL_256M
)
432 panic("mpc601_ioseg_add: len != 256M");
435 * Translate into an I/O segment, load it, and stash away for use
436 * in pmap_bootstrap().
438 iosrtable
[i
] = SR601(SR601_Ks
, SR601_BUID_MEMFORCED
, 0, i
);
439 __asm
volatile ("mtsrin %0,%1"
440 :: "r"(iosrtable
[i
]),
443 #endif /* PPC_OEA601 */
445 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
447 oea_iobat_add(paddr_t pa
, register_t len
)
450 const u_int i
= pa
>> 28;
451 battable
[i
].batl
= BATL(pa
, BAT_I
|BAT_G
, BAT_PP_RW
);
452 battable
[i
].batu
= BATU(pa
, len
, BAT_Vs
);
455 * Let's start loading the BAT registers.
459 __asm
volatile ("mtdbatl 1,%0; mtdbatu 1,%1;"
460 :: "r"(battable
[i
].batl
),
461 "r"(battable
[i
].batu
));
465 __asm
volatile ("mtdbatl 2,%0; mtdbatu 2,%1;"
466 :: "r"(battable
[i
].batl
),
467 "r"(battable
[i
].batu
));
471 __asm
volatile ("mtdbatl 3,%0; mtdbatu 3,%1;"
472 :: "r"(battable
[i
].batl
),
473 "r"(battable
[i
].batu
));
482 oea_iobat_remove(paddr_t pa
)
487 n
= pa
>> ADDR_SR_SHFT
;
488 if (!BAT_VA_MATCH_P(battable
[n
].batu
, pa
) ||
489 !BAT_VALID_P(battable
[n
].batu
, PSL_PR
))
491 battable
[n
].batl
= 0;
492 battable
[n
].batu
= 0;
493 #define BAT_RESET(n) \
494 __asm volatile("mtdbatu %0,%1; mtdbatl %0,%1" :: "n"(n), "r"(0))
495 #define BATU_GET(n, r) __asm volatile("mfdbatu %0,%1" : "=r"(r) : "n"(n))
497 for (i
=1 ; i
<4 ; i
++) {
501 if (BAT_VA_MATCH_P(batu
, pa
) &&
502 BAT_VALID_P(batu
, PSL_PR
))
507 if (BAT_VA_MATCH_P(batu
, pa
) &&
508 BAT_VALID_P(batu
, PSL_PR
))
513 if (BAT_VA_MATCH_P(batu
, pa
) &&
514 BAT_VALID_P(batu
, PSL_PR
))
524 oea_batinit(paddr_t pa
, ...)
526 struct mem_region
*allmem
, *availmem
, *mp
;
527 unsigned int cpuvers
;
528 register_t msr
= mfmsr();
531 cpuvers
= mfpvr() >> 16;
534 * Initialize BAT registers to unmapped to not generate
535 * overlapping mappings below.
537 * The 601's implementation differs in the Valid bit being situated
538 * in the lower BAT register, and in being a unified BAT only whose
539 * four entries are accessed through the IBAT[0-3] SPRs.
541 * Also, while the 601 does distinguish between supervisor/user
542 * protection keys, it does _not_ distinguish between validity in
543 * supervisor/user mode.
545 if ((msr
& (PSL_IR
|PSL_DR
)) == 0) {
547 if (cpuvers
== MPC601
) {
548 __asm
volatile ("mtibatl 0,%0" :: "r"(0));
549 __asm
volatile ("mtibatl 1,%0" :: "r"(0));
550 __asm
volatile ("mtibatl 2,%0" :: "r"(0));
551 __asm
volatile ("mtibatl 3,%0" :: "r"(0));
553 #endif /* PPC_OEA601 */
555 __asm
volatile ("mtibatu 0,%0" :: "r"(0));
556 __asm
volatile ("mtibatu 1,%0" :: "r"(0));
557 __asm
volatile ("mtibatu 2,%0" :: "r"(0));
558 __asm
volatile ("mtibatu 3,%0" :: "r"(0));
559 __asm
volatile ("mtdbatu 0,%0" :: "r"(0));
560 __asm
volatile ("mtdbatu 1,%0" :: "r"(0));
561 __asm
volatile ("mtdbatu 2,%0" :: "r"(0));
562 __asm
volatile ("mtdbatu 3,%0" :: "r"(0));
567 * Set up BAT to map physical memory
570 if (cpuvers
== MPC601
) {
574 * Set up battable to map the lowest 256 MB area.
575 * Map the lowest 32 MB area via BAT[0-3];
576 * BAT[01] are fixed, BAT[23] are floating.
578 for (i
= 0; i
< 32; i
++) {
579 battable
[i
].batl
= BATL601(i
<< 23,
580 BAT601_BSM_8M
, BAT601_V
);
581 battable
[i
].batu
= BATU601(i
<< 23,
582 BAT601_M
, BAT601_Ku
, BAT601_PP_NONE
);
584 __asm
volatile ("mtibatu 0,%1; mtibatl 0,%0"
585 :: "r"(battable
[0x00000000 >> 23].batl
),
586 "r"(battable
[0x00000000 >> 23].batu
));
587 __asm
volatile ("mtibatu 1,%1; mtibatl 1,%0"
588 :: "r"(battable
[0x00800000 >> 23].batl
),
589 "r"(battable
[0x00800000 >> 23].batu
));
590 __asm
volatile ("mtibatu 2,%1; mtibatl 2,%0"
591 :: "r"(battable
[0x01000000 >> 23].batl
),
592 "r"(battable
[0x01000000 >> 23].batu
));
593 __asm
volatile ("mtibatu 3,%1; mtibatl 3,%0"
594 :: "r"(battable
[0x01800000 >> 23].batl
),
595 "r"(battable
[0x01800000 >> 23].batu
));
597 #endif /* PPC_OEA601 */
600 * Set up BAT0 to only map the lowest 256 MB area
602 battable
[0].batl
= BATL(0x00000000, BAT_M
, BAT_PP_RW
);
603 battable
[0].batu
= BATU(0x00000000, BAT_BL_256M
, BAT_Vs
);
605 __asm
volatile ("mtibatl 0,%0; mtibatu 0,%1;"
606 "mtdbatl 0,%0; mtdbatu 0,%1;"
607 :: "r"(battable
[0].batl
), "r"(battable
[0].batu
));
611 * Now setup other fixed bat registers
613 * Note that we still run in real mode, and the BAT
614 * registers were cleared above.
620 * Add any I/O BATs specificed;
621 * use I/O segments on the BAT-starved 601.
624 if (cpuvers
== MPC601
) {
626 register_t len
= va_arg(ap
, register_t
);
627 mpc601_ioseg_add(pa
, len
);
628 pa
= va_arg(ap
, paddr_t
);
634 register_t len
= va_arg(ap
, register_t
);
635 oea_iobat_add(pa
, len
);
636 pa
= va_arg(ap
, paddr_t
);
643 * Set up battable to map all RAM regions.
644 * This is here because mem_regions() call needs bat0 set up.
646 mem_regions(&allmem
, &availmem
);
648 if (cpuvers
== MPC601
) {
649 for (mp
= allmem
; mp
->size
; mp
++) {
650 paddr_t paddr
= mp
->start
& 0xff800000;
651 paddr_t end
= mp
->start
+ mp
->size
;
654 u_int ix
= paddr
>> 23;
657 BATL601(paddr
, BAT601_BSM_8M
, BAT601_V
);
659 BATU601(paddr
, BAT601_M
, BAT601_Ku
, BAT601_PP_NONE
);
661 } while (paddr
< end
);
666 for (mp
= allmem
; mp
->size
; mp
++) {
667 paddr_t paddr
= mp
->start
& 0xf0000000;
668 paddr_t end
= mp
->start
+ mp
->size
;
671 u_int ix
= paddr
>> 28;
674 BATL(paddr
, BAT_M
, BAT_PP_RW
);
676 BATU(paddr
, BAT_BL_256M
, BAT_Vs
);
677 paddr
+= SEGMENT_LENGTH
;
678 } while (paddr
< end
);
682 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */
685 oea_install_extint(void (*handler
)(void))
687 extern int extint
[], extsize
[];
688 extern int extint_call
[];
689 uintptr_t offset
= (uintptr_t)handler
- (uintptr_t)extint_call
;
693 if (offset
> 0x1ffffff)
694 panic("install_extint: %p too far away (%#lx)", handler
,
695 (unsigned long) offset
);
697 __asm
volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
698 : "=r" (omsr
), "=r" (msr
)
699 : "K" ((u_short
)~PSL_EE
));
700 extint_call
[0] = (extint_call
[0] & 0xfc000003) | offset
;
701 __syncicache((void *)extint_call
, sizeof extint_call
[0]);
703 memcpy((void *)(EXC_HIGHVEC
+ EXC_EXI
), extint
, (size_t)extsize
);
704 __syncicache((void *)(EXC_HIGHVEC
+ EXC_EXI
), (int)extsize
);
706 memcpy((void *)EXC_EXI
, extint
, (size_t)extsize
);
707 __syncicache((void *)EXC_EXI
, (int)extsize
);
709 __asm
volatile ("mtmsr %0" :: "r"(omsr
));
713 * Machine dependent startup code.
716 oea_startup(const char *model
)
720 vaddr_t minaddr
, maxaddr
;
723 KASSERT(curcpu() != NULL
);
724 KASSERT(lwp0
.l_cpu
!= NULL
);
725 KASSERT(curcpu()->ci_intstk
!= 0);
726 KASSERT(curcpu()->ci_intrdepth
== -1);
728 sz
= round_page(MSGBUFSIZE
);
730 v
= (void *) MSGBUFADDR
;
733 * If the msgbuf is not in segment 0, allocate KVA for it and access
734 * it via mapped pages. [This prevents unneeded BAT switches.]
736 v
= (void *) msgbuf_paddr
;
737 if (msgbuf_paddr
+ sz
> SEGMENT_LENGTH
) {
741 if (uvm_map(kernel_map
, &minaddr
, sz
,
742 NULL
, UVM_UNKNOWN_OFFSET
, 0,
743 UVM_MAPFLAG(UVM_PROT_NONE
, UVM_PROT_NONE
,
744 UVM_INH_NONE
, UVM_ADV_NORMAL
, 0)) != 0)
745 panic("startup: cannot allocate VM for msgbuf");
747 for (i
= 0; i
< sz
; i
+= PAGE_SIZE
) {
748 pmap_kenter_pa(minaddr
+ i
, msgbuf_paddr
+ i
,
749 VM_PROT_READ
|VM_PROT_WRITE
, 0);
751 pmap_update(pmap_kernel());
756 printf("%s%s", copyright
, version
);
758 printf("Model: %s\n", model
);
759 cpu_identify(NULL
, 0);
761 format_bytes(pbuf
, sizeof(pbuf
), ctob((u_int
)physmem
));
762 printf("total memory = %s\n", pbuf
);
765 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after
766 * the bufpages are allocated in case they overlap since it's not
767 * fatal if we can't allocate these.
769 if (KERNEL_SR
== 13 || KERNEL2_SR
== 14) {
771 minaddr
= 0xDEAC0000;
772 error
= uvm_map(kernel_map
, &minaddr
, 0x30000,
773 NULL
, UVM_UNKNOWN_OFFSET
, 0,
774 UVM_MAPFLAG(UVM_PROT_NONE
, UVM_PROT_NONE
, UVM_INH_NONE
,
775 UVM_ADV_NORMAL
, UVM_FLAG_FIXED
));
776 if (error
!= 0 || minaddr
!= 0xDEAC0000)
777 printf("oea_startup: failed to allocate DEAD "
778 "ZONE: error=%d\n", error
);
784 * Allocate a submap for physio
786 phys_map
= uvm_km_suballoc(kernel_map
, &minaddr
, &maxaddr
,
787 VM_PHYS_SIZE
, 0, false, NULL
);
789 #ifndef PMAP_MAP_POOLPAGE
791 * No need to allocate an mbuf cluster submap. Mbuf clusters
792 * are allocated via the pool allocator, and we use direct-mapped
795 mb_map
= uvm_km_suballoc(kernel_map
, &minaddr
, &maxaddr
,
796 mclbytes
*nmbclusters
, VM_MAP_INTRSAFE
, false, NULL
);
799 format_bytes(pbuf
, sizeof(pbuf
), ptoa(uvmexp
.free
));
800 printf("avail memory = %s\n", pbuf
);
804 * Crash dump handling.
810 printf("dumpsys: TBD\n");
814 * Convert kernel VA to physical address
824 if (addr
< (void *)end
)
825 return (paddr_t
)addr
;
827 va
= trunc_page((vaddr_t
)addr
);
828 off
= (uintptr_t)addr
- va
;
830 if (pmap_extract(pmap_kernel(), va
, &pa
) == false) {
831 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/
832 return (paddr_t
)addr
;
839 * Allocate vm space and mapin the I/O address
842 mapiodev(paddr_t pa
, psize_t len
)
848 faddr
= trunc_page(pa
);
850 len
= round_page(off
+ len
);
851 va
= taddr
= uvm_km_alloc(kernel_map
, len
, 0, UVM_KMF_VAONLY
);
856 for (; len
> 0; len
-= PAGE_SIZE
) {
857 pmap_kenter_pa(taddr
, faddr
, VM_PROT_READ
| VM_PROT_WRITE
, 0);
861 pmap_update(pmap_kernel());
862 return (void *)(va
+ off
);
866 unmapiodev(vaddr_t va
, vsize_t len
)
873 faddr
= trunc_page(va
);
874 len
= round_page(va
- faddr
+ len
);
876 pmap_kremove(faddr
, len
);
877 pmap_update(pmap_kernel());
878 uvm_km_free(kernel_map
, faddr
, len
, UVM_KMF_VAONLY
);
884 panic("call to null-ptr from %p", lr
);