Switch remaining SPARC32 code over to OFMEM.
[openbios.git] / arch / sparc32 / lib.c
blob72d3a870892ab4e3942e16cce1681f905b1259e3
1 /* lib.c
2 * tag: simple function library
4 * Copyright (C) 2003 Stefan Reinauer
6 * See the file "COPYING" for further information about
7 * the copyright and warranty status of this work.
8 */
10 #include "libc/vsprintf.h"
11 #include "libopenbios/bindings.h"
12 #include "libopenbios/ofmem.h"
13 #include "asm/asi.h"
14 #include "pgtsrmmu.h"
15 #include "openprom.h"
16 #include "libopenbios/sys_info.h"
17 #include "boot.h"
18 #include "romvec.h"
20 #define NCTX_SWIFT 0x100
21 #define LOWMEMSZ 32 * 1024 * 1024
23 #ifdef CONFIG_DEBUG_MEM
24 #define DPRINTF(fmt, args...) \
25 do { printk(fmt , ##args); } while (0)
26 #else
27 #define DPRINTF(fmt, args...)
28 #endif
30 /* Format a string and print it on the screen, just like the libc
31 * function printf.
33 int printk( const char *fmt, ... )
35 char *p, buf[512];
36 va_list args;
37 int i;
39 va_start(args, fmt);
40 i = vsnprintf(buf, sizeof(buf), fmt, args);
41 va_end(args);
43 for( p=buf; *p; p++ )
44 putchar(*p);
45 return i;
49 * Allocatable memory chunk.
51 struct mem {
52 char *start, *uplim;
53 char *curp;
56 struct mem cdvmem; /* Current device virtual memory space */
58 unsigned int va_shift;
59 static unsigned long *context_table;
60 static unsigned long *l1;
62 static phandle_t s_phandle_memory = 0;
63 static phandle_t s_phandle_mmu = 0;
64 static ucell *mem_reg = 0;
65 static ucell *mem_avail = 0;
66 static ucell *virt_avail = 0;
68 static struct linux_mlist_v0 totphys[1];
69 static struct linux_mlist_v0 totmap[1];
70 static struct linux_mlist_v0 totavail[1];
72 struct linux_mlist_v0 *ptphys;
73 struct linux_mlist_v0 *ptmap;
74 struct linux_mlist_v0 *ptavail;
76 /* Private functions for mapping between physical/virtual addresses */
77 phys_addr_t
78 va2pa(unsigned long va)
80 if ((va >= (unsigned long)&_start) &&
81 (va < (unsigned long)&_end))
82 return va - va_shift;
83 else
84 return va;
87 unsigned long
88 pa2va(phys_addr_t pa)
90 if ((pa + va_shift >= (unsigned long)&_start) &&
91 (pa + va_shift < (unsigned long)&_end))
92 return pa + va_shift;
93 else
94 return pa;
97 void *
98 malloc(int size)
100 return ofmem_malloc(size);
103 void *
104 realloc( void *ptr, size_t size )
106 return ofmem_realloc(ptr, size);
109 void
110 free(void *ptr)
112 ofmem_free(ptr);
116 * Allocate memory. This is reusable.
118 void
119 mem_init(struct mem *t, char *begin, char *limit)
121 t->start = begin;
122 t->uplim = limit;
123 t->curp = begin;
126 void *
127 mem_alloc(struct mem *t, int size, int align)
129 char *p;
130 unsigned long pa;
132 // The alignment restrictions refer to physical, not virtual
133 // addresses
134 pa = va2pa((unsigned long)t->curp) + (align - 1);
135 pa &= ~(align - 1);
136 p = (char *)pa2va(pa);
138 if ((unsigned long)p >= (unsigned long)t->uplim ||
139 (unsigned long)p + size > (unsigned long)t->uplim)
140 return NULL;
141 t->curp = p + size;
143 return p;
146 static unsigned long
147 find_pte(unsigned long va, int alloc)
149 uint32_t pte;
150 void *p;
151 unsigned long pa;
152 int ret;
154 pte = l1[(va >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)];
155 if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
156 if (alloc) {
157 ret = ofmem_posix_memalign(&p, SRMMU_PTRS_PER_PMD * sizeof(int),
158 SRMMU_PTRS_PER_PMD * sizeof(int));
159 if (ret != 0)
160 return ret;
161 pte = SRMMU_ET_PTD | ((va2pa((unsigned long)p)) >> 4);
162 l1[(va >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)] = pte;
163 /* barrier() */
164 } else {
165 return -1;
169 pa = (pte & 0xFFFFFFF0) << 4;
170 pa += ((va >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)) << 2;
171 pte = *(uint32_t *)pa2va(pa);
172 if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
173 if (alloc) {
174 ret = ofmem_posix_memalign(&p, SRMMU_PTRS_PER_PTE * sizeof(void *),
175 SRMMU_PTRS_PER_PTE * sizeof(void *));
176 if (ret != 0)
177 return ret;
178 pte = SRMMU_ET_PTD | ((va2pa((unsigned int)p)) >> 4);
179 *(uint32_t *)pa2va(pa) = pte;
180 } else {
181 return -2;
185 pa = (pte & 0xFFFFFFF0) << 4;
186 pa += ((va >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)) << 2;
188 return pa2va(pa);
191 static void
192 map_pages(phys_addr_t phys, unsigned long virt,
193 unsigned long size, unsigned long mode)
195 unsigned long npages, off;
196 uint32_t pte;
197 unsigned long pa;
199 DPRINTF("map_pages: va 0x%lx, pa 0x%llx, size 0x%lx\n", virt, phys, size);
201 off = phys & (PAGE_SIZE - 1);
202 npages = (off + (size - 1) + (PAGE_SIZE - 1)) / PAGE_SIZE;
203 phys &= ~(uint64_t)(PAGE_SIZE - 1);
205 while (npages-- != 0) {
206 pa = find_pte(virt, 1);
208 pte = SRMMU_ET_PTE | ((phys & PAGE_MASK) >> 4);
209 pte |= mode;
211 *(uint32_t *)pa = pte;
213 virt += PAGE_SIZE;
214 phys += PAGE_SIZE;
219 * D5.3 pgmap@ ( va -- pte )
221 static void
222 pgmap_fetch(void)
224 uint32_t pte;
225 unsigned long va, pa;
227 va = POP();
229 pa = find_pte(va, 0);
230 if (pa == 1 || pa == 2)
231 goto error;
232 pte = *(uint32_t *)pa;
233 DPRINTF("pgmap@: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte);
235 PUSH(pte);
236 return;
237 error:
238 PUSH(0);
242 * D5.3 pgmap! ( pte va -- )
244 static void
245 pgmap_store(void)
247 uint32_t pte;
248 unsigned long va, pa;
250 va = POP();
251 pte = POP();
253 pa = find_pte(va, 1);
254 *(uint32_t *)pa = pte;
255 DPRINTF("pgmap!: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte);
259 * D5.3 map-pages ( pa space va size -- )
261 static void
262 ob_map_pages(void)
264 unsigned long va;
265 int size;
266 uint64_t pa;
268 size = POP();
269 va = POP();
270 pa = POP();
271 pa <<= 32;
272 pa |= POP() & 0xffffffff;
274 map_pages(pa, va, size, ofmem_arch_default_translation_mode(pa));
275 DPRINTF("map-page: va 0x%lx pa 0x%llx size 0x%x\n", va, pa, size);
278 static void
279 update_memory_properties(void)
281 /* Update the device tree memory properties from the master
282 totphys, totmap and totavail romvec arrays */
283 mem_reg[0] = 0;
284 mem_reg[1] = pointer2cell(totphys[0].start_adr);
285 mem_reg[2] = totphys[0].num_bytes;
287 virt_avail[0] = 0;
288 virt_avail[1] = 0;
289 virt_avail[2] = pointer2cell(totmap[0].start_adr);
291 mem_avail[0] = 0;
292 mem_avail[1] = pointer2cell(totavail[0].start_adr);
293 mem_avail[2] = totavail[0].num_bytes;
296 static void
297 init_romvec_mem(void)
299 ptphys = totphys;
300 ptmap = totmap;
301 ptavail = totavail;
304 * Form memory descriptors.
306 totphys[0].theres_more = NULL;
307 totphys[0].start_adr = (char *) 0;
308 totphys[0].num_bytes = qemu_mem_size;
310 totavail[0].theres_more = NULL;
311 totavail[0].start_adr = (char *) 0;
312 totavail[0].num_bytes = va2pa((int)&_start) - PAGE_SIZE;
314 totmap[0].theres_more = NULL;
315 totmap[0].start_adr = &_start;
316 totmap[0].num_bytes = (unsigned long) &_iomem -
317 (unsigned long) &_start + PAGE_SIZE;
319 /* Pointers to device tree memory properties */
320 mem_reg = malloc(sizeof(ucell) * 3);
321 mem_avail = malloc(sizeof(ucell) * 3);
322 virt_avail = malloc(sizeof(ucell) * 3);
324 update_memory_properties();
327 char *obp_dumb_mmap(char *va, int which_io, unsigned int pa,
328 unsigned int size)
330 uint64_t mpa = ((uint64_t)which_io << 32) | (uint64_t)pa;
332 map_pages(mpa, (unsigned long)va, size, ofmem_arch_default_translation_mode(mpa));
333 return va;
336 void obp_dumb_munmap(__attribute__((unused)) char *va,
337 __attribute__((unused)) unsigned int size)
339 DPRINTF("obp_dumb_munmap: virta 0x%x, sz %d\n", (unsigned int)va, size);
342 void ofmem_arch_unmap_pages(ucell virt, ucell size)
344 /* Currently do nothing */
347 void ofmem_arch_early_map_pages(phys_addr_t phys, ucell virt, ucell size, ucell mode)
349 map_pages(phys, virt, size, mode);
352 char *obp_dumb_memalloc(char *va, unsigned int size)
354 phys_addr_t phys;
355 ucell virt;
357 /* Claim physical memory */
358 phys = ofmem_claim_phys(-1, size, CONFIG_OFMEM_MALLOC_ALIGN);
360 /* Claim virtual memory */
361 virt = ofmem_claim_virt(pointer2cell(va), size, 0);
363 /* Map the memory */
364 ofmem_map(phys, virt, size, ofmem_arch_default_translation_mode(phys));
366 return cell2pointer(virt);
369 void obp_dumb_memfree(__attribute__((unused))char *va,
370 __attribute__((unused))unsigned sz)
372 DPRINTF("obp_dumb_memfree 0x%p (size %d)\n", va, sz);
375 void
376 ob_init_mmu(void)
378 ucell *reg;
380 init_romvec_mem();
382 /* Find the phandles for the /memory and /virtual-memory nodes */
383 push_str("/memory");
384 fword("find-package");
385 POP();
386 s_phandle_memory = POP();
388 push_str("/virtual-memory");
389 fword("find-package");
390 POP();
391 s_phandle_mmu = POP();
393 ofmem_register(s_phandle_memory, s_phandle_mmu);
395 /* Setup /memory:reg (totphys) property */
396 reg = malloc(3 * sizeof(ucell));
397 ofmem_arch_encode_physaddr(reg, 0); /* physical base */
398 reg[2] = (ucell)ofmem_arch_get_phys_top(); /* size */
400 push_str("/memory");
401 fword("find-device");
402 PUSH(pointer2cell(reg));
403 PUSH(3 * sizeof(ucell));
404 push_str("reg");
405 PUSH_ph(s_phandle_memory);
406 fword("encode-property");
408 PUSH(0);
409 fword("active-package!");
410 bind_func("pgmap@", pgmap_fetch);
411 bind_func("pgmap!", pgmap_store);
412 bind_func("map-pages", ob_map_pages);
416 * Switch page tables.
418 void
419 init_mmu_swift(void)
421 unsigned int addr, i;
422 unsigned long pa, va;
423 int size;
425 ofmem_posix_memalign((void *)&context_table, NCTX_SWIFT * sizeof(int),
426 NCTX_SWIFT * sizeof(int));
427 ofmem_posix_memalign((void *)&l1, 256 * sizeof(int), 256 * sizeof(int));
429 context_table[0] = (((unsigned long)va2pa((unsigned long)l1)) >> 4) |
430 SRMMU_ET_PTD;
432 for (i = 1; i < NCTX_SWIFT; i++) {
433 context_table[i] = SRMMU_ET_INVALID;
435 for (i = 0; i < 256; i += 4) {
436 l1[i] = SRMMU_ET_INVALID;
439 // text, rodata, data, and bss mapped to end of RAM
440 va = (unsigned long)&_start;
441 size = (unsigned long)&_end - (unsigned long)&_start;
442 pa = va2pa(va);
443 map_pages(pa, va, size, ofmem_arch_default_translation_mode(pa));
445 // 1:1 mapping for RAM
446 map_pages(0, 0, LOWMEMSZ, ofmem_arch_default_translation_mode(0));
449 * Flush cache
451 for (addr = 0; addr < 0x2000; addr += 0x10) {
452 __asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
453 "r" (addr), "i" (ASI_M_DATAC_TAG));
454 __asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
455 "r" (addr<<1), "i" (ASI_M_TXTC_TAG));
457 srmmu_set_context(0);
458 srmmu_set_ctable_ptr(va2pa((unsigned long)context_table));
459 srmmu_flush_whole_tlb();