1 /* $NetBSD: sun2.c,v 1.10 2009/01/12 07:00:59 tsutsui Exp $ */
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross and Matthew Fredette.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Standalone functions specific to the Sun2.
36 /* Need to avoid conflicts on these: */
37 #define get_pte sun2_get_pte
38 #define set_pte sun2_set_pte
39 #define get_segmap sun2_get_segmap
40 #define set_segmap sun2_set_segmap
43 * We need to get the sun2 NBSG definition, even if we're
44 * building this with a different sun68k target.
46 #include <arch/sun2/include/pmap.h>
48 #include <sys/param.h>
49 #include <machine/idprom.h>
50 #include <machine/mon.h>
52 #include <arch/sun2/include/pte.h>
53 #include <arch/sun2/sun2/control.h>
55 #include <arch/sun3/sun3/vme.h>
57 #define VME16_BASE MBIO_BASE
58 #define VME16_MASK MBIO_MASK
60 #include <arch/sun2/sun2/mbmem.h>
61 #include <arch/sun2/sun2/mbio.h>
67 #include "saio.h" /* enum MAPTYPES */
69 #define OBIO_MASK 0xFFFFFF
71 u_int
get_pte(vaddr_t
);
72 void set_pte(vaddr_t
, u_int
);
73 void dvma2_init(void);
74 char * dvma2_alloc(int);
75 void dvma2_free(char *, int);
76 char * dvma2_mapin(char *, int);
77 void dvma2_mapout(char *, int);
78 char * dev2_mapin(int, u_long
, int);
89 sun2_mapinfo
[MAP__NTYPES
] = {
90 /* On-board memory, I/O */
91 { MAP_MAINMEM
, PGT_OBMEM
, 0, ~0 },
92 { MAP_OBIO
, PGT_OBIO
, 0, OBIO_MASK
},
93 /* Multibus memory, I/O */
94 { MAP_MBMEM
, PGT_MBMEM
, MBMEM_BASE
, MBMEM_MASK
},
95 { MAP_MBIO
, PGT_MBIO
, MBIO_BASE
, MBIO_MASK
},
97 { MAP_VME16A16D
, PGT_VME_D16
, VME16_BASE
, VME16_MASK
},
98 { MAP_VME16A32D
, 0, 0, 0 },
100 { MAP_VME24A16D
, 0, 0, 0 },
101 { MAP_VME24A32D
, 0, 0, 0 },
103 { MAP_VME32A16D
, 0, 0, 0 },
104 { MAP_VME32A32D
, 0, 0, 0 },
108 /* The virtual address we will use for PROM device mappings. */
109 int sun2_devmap
= SUN3_MONSHORTSEG
;
112 dev2_mapin(int maptype
, u_long physaddr
, int length
)
115 u_int i
, pa
, pte
, pgva
, va
;
117 if ((sun2_devmap
+ length
) > SUN3_MONSHORTPAGE
)
118 panic("dev2_mapin: length=%d", length
);
120 for (i
= 0; i
< MAP__NTYPES
; i
++)
121 if (sun2_mapinfo
[i
].maptype
== maptype
)
123 panic("dev2_mapin: bad maptype");
126 if (physaddr
& ~(sun2_mapinfo
[i
].mask
))
127 panic("dev2_mapin: bad address");
128 pa
= sun2_mapinfo
[i
].base
+= physaddr
;
130 pte
= PA_PGNUM(pa
) | PG_PERM
|
131 sun2_mapinfo
[i
].pgtype
;
133 va
= pgva
= sun2_devmap
;
139 } while (length
> 0);
141 va
+= (physaddr
& PGOFSET
);
145 printf("dev2_mapin: va=0x%x pte=0x%x\n",
155 /*****************************************************************
160 * The easiest way to deal with the need for DVMA mappings is to
161 * create a DVMA alias mapping of the entire address range used by
162 * the boot program. That way, dvma_mapin can just compute the
163 * DVMA alias address, and dvma_mapout does nothing.
165 * Note that this assumes that standalone programs will do I/O
166 * operations only within range (SA_MIN_VA .. SA_MAX_VA) checked.
169 #define DVMA_BASE 0x00f00000
170 #define DVMA_MAPLEN 0x38000 /* 256K - 32K (save MONSHORTSEG) */
172 #define SA_MIN_VA 0x220000
173 #define SA_MAX_VA (SA_MIN_VA + DVMA_MAPLEN)
175 /* This points to the end of the free DVMA space. */
176 u_int dvma2_end
= DVMA_BASE
+ DVMA_MAPLEN
;
181 int segva
, dmava
, sme
;
186 while (segva
< SA_MAX_VA
) {
187 sme
= get_segmap(segva
);
188 set_segmap(dmava
, sme
);
194 /* Convert a local address to a DVMA address. */
196 dvma2_mapin(char *addr
, int len
)
200 /* Make sure the address is in the DVMA map. */
201 if ((va
< SA_MIN_VA
) || (va
>= SA_MAX_VA
))
202 panic("dvma2_mapin: 0x%x outside 0x%x..0x%x",
203 va
, SA_MIN_VA
, SA_MAX_VA
);
208 return ((char *) va
);
211 /* Destroy a DVMA address alias. */
213 dvma2_mapout(char *addr
, int len
)
217 /* Make sure the address is in the DVMA map. */
218 if ((va
< DVMA_BASE
) || (va
>= (DVMA_BASE
+ DVMA_MAPLEN
)))
219 panic("dvma2_mapout");
225 len
= m68k_round_page(len
);
227 return((char*)dvma2_end
);
231 dvma2_free(char *dvma
, int len
)
233 /* not worth the trouble */
236 /*****************************************************************
237 * Control space stuff...
245 pte
= get_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE
, va
));
246 if (pte
& PG_VALID
) {
248 * This clears bit 30 (the kernel readable bit, which
249 * should always be set), bit 28 (which should always
250 * be set) and bit 26 (the user writable bit, which we
251 * always have tracking the kernel writable bit). In
252 * the protection, this leaves bit 29 (the kernel
253 * writable bit) and bit 27 (the user readable bit).
254 * See pte2.h for more about this hack.
256 pte
&= ~(0x54000000);
258 * Flip bit 27 (the user readable bit) to become bit
259 * 27 (the PG_SYSTEM bit).
267 set_pte(vaddr_t va
, u_int pte
)
269 if (pte
& PG_VALID
) {
270 /* Clear bit 26 (the user writable bit). */
271 pte
&= (~0x04000000);
273 * Flip bit 27 (the PG_SYSTEM bit) to become bit 27
274 * (the user readable bit).
278 * Always set bits 30 (the kernel readable bit) and
279 * bit 28, and set bit 26 (the user writable bit) iff
280 * bit 29 (the kernel writable bit) is set *and* bit
281 * 27 (the user readable bit) is set. This latter bit
282 * of logic is expressed in the bizarre second term
283 * below, chosen because it needs no branches.
285 #if (PG_WRITE >> 2) != PG_SYSTEM
286 #error "PG_WRITE and PG_SYSTEM definitions don't match!"
289 | ((((pte
& PG_WRITE
) >> 2) & pte
) >> 1);
291 set_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE
, va
), pte
);
295 get_segmap(vaddr_t va
)
297 va
= CONTROL_ADDR_BUILD(SEGMAP_BASE
, va
);
298 return (get_control_byte(va
));
302 set_segmap(vaddr_t va
, int sme
)
304 va
= CONTROL_ADDR_BUILD(SEGMAP_BASE
, va
);
305 set_control_byte(va
, sme
);
309 * Copy the IDPROM contents into the passed buffer.
310 * The caller (idprom.c) will do the checksum.
313 sun2_getidprom(u_char
*dst
)
315 vaddr_t src
; /* control space address */
319 len
= sizeof(struct idprom
);
321 x
= get_control_byte(src
);
327 /*****************************************************************
328 * Init our function pointers, etc.
332 * For booting, the PROM in fredette's Sun 2/120 doesn't map
333 * much main memory, and what is mapped is mapped strangely.
334 * Low virtual memory is mapped like:
336 * 0x000000 - 0x0bffff virtual -> 0x000000 - 0x0bffff physical
337 * 0x0c0000 - 0x0fffff virtual -> invalid
338 * 0x100000 - 0x13ffff virtual -> 0x0c0000 - 0x0fffff physical
339 * 0x200800 - 0x3fffff virtual -> 0x200800 - 0x3fffff physical
341 * I think the SunOS authors wanted to load kernels starting at
342 * physical zero, and assumed that kernels would be less
343 * than 768K (0x0c0000) long. Also, the PROM maps physical
344 * 0x0c0000 - 0x0fffff into DVMA space, so we can't take the
345 * easy road and just add more mappings to use that physical
346 * memory while loading (the PROM might do DMA there).
348 * What we do, then, is assume a 4MB machine (you'll really
349 * need that to run NetBSD at all anyways), and we map two
350 * chunks of physical and virtual space:
352 * 0x400000 - 0x4bffff virtual -> 0x000000 - 0x0bffff physical
353 * 0x4c0000 - 0x600000 virtual -> 0x2c0000 - 0x3fffff physical
355 * And then we load starting at virtual 0x400000. We will do
356 * all of this mapping just by copying PMEGs.
358 * After the load is done, but before we enter the kernel, we're
359 * done with the PROM, so we copy the part of the kernel that
360 * got loaded at physical 0x2c0000 down to physical 0x0c0000.
361 * This can't just be a PMEG copy; we've actually got to move
362 * bytes in physical memory.
364 * These two chunks of physical and virtual space are defined
365 * in macros below. Some of the macros are only for completeness:
367 #define MEM_CHUNK0_SIZE (0x0c0000)
368 #define MEM_CHUNK0_LOAD_PHYS (0x000000)
369 #define MEM_CHUNK0_LOAD_VIRT (0x400000)
370 #define MEM_CHUNK0_LOAD_VIRT_PROM MEM_CHUNK0_LOAD_PHYS
371 #define MEM_CHUNK0_COPY_PHYS MEM_CHUNK0_LOAD_PHYS
372 #define MEM_CHUNK0_COPY_VIRT MEM_CHUNK0_COPY_PHYS
374 #define MEM_CHUNK1_SIZE (0x140000)
375 #define MEM_CHUNK1_LOAD_PHYS (0x2c0000)
376 #define MEM_CHUNK1_LOAD_VIRT (MEM_CHUNK0_LOAD_VIRT + MEM_CHUNK0_SIZE)
377 #define MEM_CHUNK1_LOAD_VIRT_PROM MEM_CHUNK1_LOAD_PHYS
378 #define MEM_CHUNK1_COPY_PHYS (MEM_CHUNK0_LOAD_PHYS + MEM_CHUNK0_SIZE)
379 #define MEM_CHUNK1_COPY_VIRT MEM_CHUNK1_COPY_PHYS
381 /* Maps memory for loading. */
383 sun2_map_mem_load(void)
387 /* Map chunk zero for loading. */
388 for(off
= 0; off
< MEM_CHUNK0_SIZE
; off
+= NBSG
)
389 set_segmap(MEM_CHUNK0_LOAD_VIRT
+ off
,
390 get_segmap(MEM_CHUNK0_LOAD_VIRT_PROM
+ off
));
392 /* Map chunk one for loading. */
393 for(off
= 0; off
< MEM_CHUNK1_SIZE
; off
+= NBSG
)
394 set_segmap(MEM_CHUNK1_LOAD_VIRT
+ off
,
395 get_segmap(MEM_CHUNK1_LOAD_VIRT_PROM
+ off
));
397 /* Tell our caller where in virtual space to load. */
398 return MEM_CHUNK0_LOAD_VIRT
;
401 /* Remaps memory for running. */
403 sun2_map_mem_run(void *entry
)
405 vaddr_t off
, off_end
;
409 /* Chunk zero is already mapped and copied. */
411 /* Chunk one needs to be mapped and copied. */
412 pte
= (get_pte(0) & ~PG_FRAME
);
413 for(off
= 0; off
< MEM_CHUNK1_SIZE
; ) {
416 * We use the PMEG immediately before the
417 * segment we're copying in the PROM virtual
418 * mapping of the chunk. If this is the first
419 * segment, this is the PMEG the PROM used to
420 * map 0x2b8000 virtual to 0x2b8000 physical,
421 * which I'll assume is unused. For the second
422 * and subsequent segments, this will be the
423 * PMEG used to map the previous segment, which
424 * is now (since we already copied it) unused.
426 sme
= get_segmap((MEM_CHUNK1_LOAD_VIRT_PROM
+ off
) - NBSG
);
427 set_segmap(MEM_CHUNK1_COPY_VIRT
+ off
, sme
);
429 /* Set the PTEs in this new PMEG. */
430 for(off_end
= off
+ NBSG
; off
< off_end
; off
+= NBPG
)
431 set_pte(MEM_CHUNK1_COPY_VIRT
+ off
,
432 pte
| PA_PGNUM(MEM_CHUNK1_COPY_PHYS
+ off
));
434 /* Copy this segment. */
435 memcpy((void *)(MEM_CHUNK1_COPY_VIRT
+ (off
- NBSG
)),
436 (void *)(MEM_CHUNK1_LOAD_VIRT
+ (off
- NBSG
)),
440 /* Tell our caller where in virtual space to enter. */
441 return ((char *)entry
) - MEM_CHUNK0_LOAD_VIRT
;
447 /* Set the function pointers. */
448 dev_mapin_p
= dev2_mapin
;
449 dvma_alloc_p
= dvma2_alloc
;
450 dvma_free_p
= dvma2_free
;
451 dvma_mapin_p
= dvma2_mapin
;
452 dvma_mapout_p
= dvma2_mapout
;
454 /* Prepare DVMA segment. */