Remove building with NOCRYPTO option
[minix.git] / minix / kernel / arch / earm / pg_utils.c
blobf787e77933fd283ebb2f64bab869ca87291ac730
1 #include <minix/cpufeature.h>
3 #include <minix/type.h>
4 #include <assert.h>
5 #include "kernel/kernel.h"
6 #include "arch_proto.h"
7 #include <machine/cpu.h>
8 #include <arm/armreg.h>
10 #include <string.h>
11 #include <minix/type.h>
13 /* These are set/computed in kernel.lds. */
14 extern char _kern_vir_base, _kern_phys_base, _kern_size;
16 /* Retrieve the absolute values to something we can use. */
17 static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
18 static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
19 static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
21 /* page directory we can use to map things */
22 static u32_t pagedir[4096] __aligned(16384);
24 void print_memmap(kinfo_t *cbi)
26 int m;
27 assert(cbi->mmap_size < MAXMEMMAP);
28 for(m = 0; m < cbi->mmap_size; m++) {
29 phys_bytes addr = cbi->memmap[m].mm_base_addr, endit = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
30 printf("%08lx-%08lx ",addr, endit);
32 printf("\nsize %08lx\n", cbi->mmap_size);
35 void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
37 int m;
38 phys_bytes o;
40 if((o=start % ARM_PAGE_SIZE))
41 start -= o;
42 if((o=end % ARM_PAGE_SIZE))
43 end += ARM_PAGE_SIZE - o;
45 assert(kernel_may_alloc);
47 for(m = 0; m < cbi->mmap_size; m++) {
48 phys_bytes substart = start, subend = end;
49 phys_bytes memaddr = cbi->memmap[m].mm_base_addr,
50 memend = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
52 /* adjust cut range to be a subset of the free memory */
53 if(substart < memaddr) substart = memaddr;
54 if(subend > memend) subend = memend;
55 if(substart >= subend) continue;
57 /* if there is any overlap, forget this one and add
58 * 1-2 subranges back
60 cbi->memmap[m].mm_base_addr = cbi->memmap[m].mm_length = 0;
61 if(substart > memaddr)
62 add_memmap(cbi, memaddr, substart-memaddr);
63 if(subend < memend)
64 add_memmap(cbi, subend, memend-subend);
68 void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
70 int m;
71 #define LIMIT 0xFFFFF000
72 /* Truncate available memory at 4GB as the rest of minix
73 * currently can't deal with any bigger.
75 if(addr > LIMIT) {
76 return;
79 if(addr + len > LIMIT) {
80 len -= (addr + len - LIMIT);
83 assert(cbi->mmap_size < MAXMEMMAP);
84 if(len == 0) {
85 return;
87 addr = roundup(addr, ARM_PAGE_SIZE);
88 len = rounddown(len, ARM_PAGE_SIZE);
90 assert(kernel_may_alloc);
92 for(m = 0; m < MAXMEMMAP; m++) {
93 phys_bytes highmark;
94 if(cbi->memmap[m].mm_length) {
95 continue;
97 cbi->memmap[m].mm_base_addr = addr;
98 cbi->memmap[m].mm_length = len;
99 cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
100 if(m >= cbi->mmap_size) {
101 cbi->mmap_size = m+1;
103 highmark = addr + len;
104 if(highmark > cbi->mem_high_phys) {
105 cbi->mem_high_phys = highmark;
107 return;
110 panic("no available memmap slot");
113 u32_t *alloc_pagetable(phys_bytes *ph)
115 u32_t *ret;
116 #define PG_PAGETABLES 24
117 static u32_t pagetables[PG_PAGETABLES][256] __aligned(1024);
118 static int pt_inuse = 0;
119 if(pt_inuse >= PG_PAGETABLES) {
120 panic("no more pagetables");
122 assert(sizeof(pagetables[pt_inuse]) == 1024);
123 ret = pagetables[pt_inuse++];
124 *ph = vir2phys(ret);
125 return ret;
128 #define PAGE_KB (ARM_PAGE_SIZE / 1024)
130 phys_bytes pg_alloc_page(kinfo_t *cbi)
132 int m;
133 multiboot_memory_map_t *mmap;
135 assert(kernel_may_alloc);
137 for(m = 0; m < cbi->mmap_size; m++) {
138 mmap = &cbi->memmap[m];
139 if(!mmap->mm_length) {
140 continue;
142 assert(mmap->mm_length > 0);
143 assert(!(mmap->mm_length % ARM_PAGE_SIZE));
144 assert(!(mmap->mm_base_addr % ARM_PAGE_SIZE));
146 u32_t addr = mmap->mm_base_addr;
147 mmap->mm_base_addr += ARM_PAGE_SIZE;
148 mmap->mm_length -= ARM_PAGE_SIZE;
150 cbi->kernel_allocated_bytes_dynamic += ARM_PAGE_SIZE;
152 return addr;
155 panic("can't find free memory");
158 void pg_identity(kinfo_t *cbi)
160 uint32_t i;
161 phys_bytes phys;
163 /* We map memory that does not correspond to physical memory
164 * as non-cacheable. Make sure we know what it is.
166 assert(cbi->mem_high_phys);
168 /* Set up an identity mapping page directory */
169 for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
170 u32_t flags = ARM_VM_SECTION
171 | ARM_VM_SECTION_USER
172 | ARM_VM_SECTION_DOMAIN;
174 phys = i * ARM_SECTION_SIZE;
175 /* mark mormal memory as cacheable. TODO: fix hard coded values */
176 if (phys >= PHYS_MEM_BEGIN && phys <= PHYS_MEM_END) {
177 pagedir[i] = phys | flags | ARM_VM_SECTION_CACHED;
178 } else {
179 pagedir[i] = phys | flags | ARM_VM_SECTION_DEVICE;
184 int pg_mapkernel(void)
186 int pde;
187 u32_t mapped = 0, kern_phys = kern_phys_start;
189 assert(!(kern_vir_start % ARM_SECTION_SIZE));
190 assert(!(kern_phys_start % ARM_SECTION_SIZE));
191 pde = kern_vir_start / ARM_SECTION_SIZE; /* start pde */
192 while(mapped < kern_kernlen) {
193 pagedir[pde] = (kern_phys & ARM_VM_SECTION_MASK)
194 | ARM_VM_SECTION
195 | ARM_VM_SECTION_SUPER
196 | ARM_VM_SECTION_DOMAIN
197 | ARM_VM_SECTION_CACHED;
198 mapped += ARM_SECTION_SIZE;
199 kern_phys += ARM_SECTION_SIZE;
200 pde++;
202 return pde; /* free pde */
205 void vm_enable_paging(void)
207 u32_t sctlr;
208 u32_t actlr;
210 write_ttbcr(0);
212 /* Set all Domains to Client */
213 write_dacr(0x55555555);
215 sctlr = read_sctlr();
217 /* Enable MMU */
218 sctlr |= CPU_CONTROL_MMU_ENABLE;
220 /* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
221 sctlr &= ~CPU_CONTROL_TR_ENABLE;
223 /* AFE set to zero (default reset value): not using simplified model. */
224 sctlr &= ~CPU_CONTROL_AF_ENABLE;
226 /* Enable instruction ,data cache and branch prediction */
227 sctlr |= CPU_CONTROL_DC_ENABLE;
228 sctlr |= CPU_CONTROL_IC_ENABLE;
229 sctlr |= CPU_CONTROL_BPRD_ENABLE;
231 /* Enable barriers */
232 sctlr |= CPU_CONTROL_32BD_ENABLE;
234 /* Enable L2 cache (cortex-a8) */
235 #define CORTEX_A8_L2EN (0x02)
236 actlr = read_actlr();
237 actlr |= CORTEX_A8_L2EN;
238 write_actlr(actlr);
240 write_sctlr(sctlr);
243 phys_bytes pg_load(void)
245 phys_bytes phpagedir = vir2phys(pagedir);
246 write_ttbr0(phpagedir);
247 return phpagedir;
250 void pg_clear(void)
252 memset(pagedir, 0, sizeof(pagedir));
255 phys_bytes pg_rounddown(phys_bytes b)
257 phys_bytes o;
258 if(!(o = b % ARM_PAGE_SIZE)) {
259 return b;
261 return b - o;
264 void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
265 kinfo_t *cbi)
267 static int mapped_pde = -1;
268 static u32_t *pt = NULL;
269 int pde, pte;
271 assert(kernel_may_alloc);
273 if(phys == PG_ALLOCATEME) {
274 assert(!(vaddr % ARM_PAGE_SIZE));
275 } else {
276 assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE));
277 vaddr = pg_rounddown(vaddr);
278 phys = pg_rounddown(phys);
280 assert(vaddr < kern_vir_start);
282 while(vaddr < vaddr_end) {
283 phys_bytes source = phys;
284 assert(!(vaddr % ARM_PAGE_SIZE));
285 if(phys == PG_ALLOCATEME) {
286 source = pg_alloc_page(cbi);
287 } else {
288 assert(!(phys % ARM_PAGE_SIZE));
290 assert(!(source % ARM_PAGE_SIZE));
291 pde = ARM_VM_PDE(vaddr);
292 pte = ARM_VM_PTE(vaddr);
293 if(mapped_pde < pde) {
294 phys_bytes ph;
295 pt = alloc_pagetable(&ph);
296 pagedir[pde] = (ph & ARM_VM_PDE_MASK)
297 | ARM_VM_PAGEDIR
298 | ARM_VM_PDE_DOMAIN;
299 mapped_pde = pde;
301 assert(pt);
302 pt[pte] = (source & ARM_VM_PTE_MASK)
303 | ARM_VM_PAGETABLE
304 | ARM_VM_PTE_CACHED
305 | ARM_VM_PTE_USER;
306 vaddr += ARM_PAGE_SIZE;
307 if(phys != PG_ALLOCATEME) {
308 phys += ARM_PAGE_SIZE;
313 void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
315 *pagedir_ph = vir2phys(pagedir);
316 *pagedir_v = pagedir;