Remove building with NOCRYPTO option
[minix.git] / minix / kernel / arch / i386 / pg_utils.c
blob585dfc6c84ac9ce8f7e6edd4a812489de7332312
2 #include <minix/cpufeature.h>
4 #include <assert.h>
5 #include "kernel/kernel.h"
6 #include "arch_proto.h"
8 #include <string.h>
10 /* These are set/computed in kernel.lds. */
11 extern char _kern_vir_base, _kern_phys_base, _kern_size;
13 /* Retrieve the absolute values to something we can use. */
14 static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
15 static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
16 static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
18 /* page directory we can use to map things */
19 static u32_t pagedir[1024] __aligned(4096);
21 void print_memmap(kinfo_t *cbi)
23 int m;
24 assert(cbi->mmap_size < MAXMEMMAP);
25 for(m = 0; m < cbi->mmap_size; m++) {
26 phys_bytes addr = cbi->memmap[m].mm_base_addr, endit = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
27 printf("%08lx-%08lx ",addr, endit);
29 printf("\nsize %08lx\n", cbi->mmap_size);
32 void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
34 int m;
35 phys_bytes o;
37 if((o=start % I386_PAGE_SIZE))
38 start -= o;
39 if((o=end % I386_PAGE_SIZE))
40 end += I386_PAGE_SIZE - o;
42 assert(kernel_may_alloc);
44 for(m = 0; m < cbi->mmap_size; m++) {
45 phys_bytes substart = start, subend = end;
46 phys_bytes memaddr = cbi->memmap[m].mm_base_addr,
47 memend = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
49 /* adjust cut range to be a subset of the free memory */
50 if(substart < memaddr) substart = memaddr;
51 if(subend > memend) subend = memend;
52 if(substart >= subend) continue;
54 /* if there is any overlap, forget this one and add
55 * 1-2 subranges back
57 cbi->memmap[m].mm_base_addr = cbi->memmap[m].mm_length = 0;
58 if(substart > memaddr)
59 add_memmap(cbi, memaddr, substart-memaddr);
60 if(subend < memend)
61 add_memmap(cbi, subend, memend-subend);
65 phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len)
67 /* Allocate the lowest physical page we have. */
68 int m;
69 #define EMPTY 0xffffffff
70 phys_bytes lowest = EMPTY;
71 assert(len > 0);
72 len = roundup(len, I386_PAGE_SIZE);
74 assert(kernel_may_alloc);
76 for(m = 0; m < cbi->mmap_size; m++) {
77 if(cbi->memmap[m].mm_length < len) continue;
78 if(cbi->memmap[m].mm_base_addr < lowest) lowest = cbi->memmap[m].mm_base_addr;
80 assert(lowest != EMPTY);
81 cut_memmap(cbi, lowest, len);
82 cbi->kernel_allocated_bytes_dynamic += len;
83 return lowest;
86 void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
88 int m;
89 #define LIMIT 0xFFFFF000
90 /* Truncate available memory at 4GB as the rest of minix
91 * currently can't deal with any bigger.
93 if(addr > LIMIT) return;
94 if(addr + len > LIMIT) {
95 len -= (addr + len - LIMIT);
97 assert(cbi->mmap_size < MAXMEMMAP);
98 if(len == 0) return;
99 addr = roundup(addr, I386_PAGE_SIZE);
100 len = rounddown(len, I386_PAGE_SIZE);
102 assert(kernel_may_alloc);
104 for(m = 0; m < MAXMEMMAP; m++) {
105 phys_bytes highmark;
106 if(cbi->memmap[m].mm_length) continue;
107 cbi->memmap[m].mm_base_addr = addr;
108 cbi->memmap[m].mm_length = len;
109 cbi->memmap[m].mm_type = MULTIBOOT_MEMORY_AVAILABLE;
110 if(m >= cbi->mmap_size)
111 cbi->mmap_size = m+1;
112 highmark = addr + len;
113 if(highmark > cbi->mem_high_phys) {
114 cbi->mem_high_phys = highmark;
117 return;
120 panic("no available memmap slot");
123 u32_t *alloc_pagetable(phys_bytes *ph)
125 u32_t *ret;
126 #define PG_PAGETABLES 6
127 static u32_t pagetables[PG_PAGETABLES][1024] __aligned(4096);
128 static int pt_inuse = 0;
129 if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
130 assert(sizeof(pagetables[pt_inuse]) == I386_PAGE_SIZE);
131 ret = pagetables[pt_inuse++];
132 *ph = vir2phys(ret);
133 return ret;
136 #define PAGE_KB (I386_PAGE_SIZE / 1024)
138 phys_bytes pg_alloc_page(kinfo_t *cbi)
140 int m;
141 multiboot_memory_map_t *mmap;
143 assert(kernel_may_alloc);
145 for(m = cbi->mmap_size-1; m >= 0; m--) {
146 mmap = &cbi->memmap[m];
147 if(!mmap->mm_length) continue;
148 assert(mmap->mm_length > 0);
149 assert(!(mmap->mm_length % I386_PAGE_SIZE));
150 assert(!(mmap->mm_base_addr % I386_PAGE_SIZE));
152 mmap->mm_length -= I386_PAGE_SIZE;
154 cbi->kernel_allocated_bytes_dynamic += I386_PAGE_SIZE;
156 return mmap->mm_base_addr + mmap->mm_length;
159 panic("can't find free memory");
162 void pg_identity(kinfo_t *cbi)
164 uint32_t i;
165 phys_bytes phys;
167 /* We map memory that does not correspond to physical memory
168 * as non-cacheable. Make sure we know what it is.
170 assert(cbi->mem_high_phys);
172 /* Set up an identity mapping page directory */
173 for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
174 u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE
175 | I386_VM_USER
176 | I386_VM_WRITE;
177 phys = i * I386_BIG_PAGE_SIZE;
178 if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
179 <= (phys & I386_VM_ADDR_MASK_4MB)) {
180 flags |= I386_VM_PWT | I386_VM_PCD;
182 pagedir[i] = phys | flags;
186 int pg_mapkernel(void)
188 int pde;
189 u32_t mapped = 0, kern_phys = kern_phys_start;
191 assert(!(kern_vir_start % I386_BIG_PAGE_SIZE));
192 assert(!(kern_phys % I386_BIG_PAGE_SIZE));
193 pde = kern_vir_start / I386_BIG_PAGE_SIZE; /* start pde */
194 while(mapped < kern_kernlen) {
195 pagedir[pde] = kern_phys | I386_VM_PRESENT |
196 I386_VM_BIGPAGE | I386_VM_WRITE;
197 mapped += I386_BIG_PAGE_SIZE;
198 kern_phys += I386_BIG_PAGE_SIZE;
199 pde++;
201 return pde; /* free pde */
204 void vm_enable_paging(void)
206 u32_t cr0, cr4;
207 int pgeok;
209 pgeok = _cpufeature(_CPUF_I386_PGE);
211 #ifdef PAE
212 if(_cpufeature(_CPUF_I386_PAE) == 0)
213 panic("kernel built with PAE support, CPU seems to lack PAE support?\n");
214 #endif
216 cr0= read_cr0();
217 cr4= read_cr4();
219 /* The boot loader should have put us in protected mode. */
220 assert(cr0 & I386_CR0_PE);
222 /* First clear PG and PGE flag, as PGE must be enabled after PG. */
223 write_cr0(cr0 & ~I386_CR0_PG);
224 write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));
226 cr0= read_cr0();
227 cr4= read_cr4();
229 /* Our page table contains 4MB entries. */
230 cr4 |= I386_CR4_PSE;
232 write_cr4(cr4);
234 /* First enable paging, then enable global page flag. */
235 cr0 |= I386_CR0_PG;
236 write_cr0(cr0);
237 cr0 |= I386_CR0_WP;
238 write_cr0(cr0);
240 /* May we enable these features? */
241 if(pgeok)
242 cr4 |= I386_CR4_PGE;
244 write_cr4(cr4);
247 phys_bytes pg_load(void)
249 phys_bytes phpagedir = vir2phys(pagedir);
250 write_cr3(phpagedir);
251 return phpagedir;
254 void pg_clear(void)
256 memset(pagedir, 0, sizeof(pagedir));
259 phys_bytes pg_rounddown(phys_bytes b)
261 phys_bytes o;
262 if(!(o = b % I386_PAGE_SIZE))
263 return b;
264 return b - o;
267 void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
268 kinfo_t *cbi)
270 static int mapped_pde = -1;
271 static u32_t *pt = NULL;
272 int pde, pte;
274 assert(kernel_may_alloc);
276 if(phys == PG_ALLOCATEME) {
277 assert(!(vaddr % I386_PAGE_SIZE));
278 } else {
279 assert((vaddr % I386_PAGE_SIZE) == (phys % I386_PAGE_SIZE));
280 vaddr = pg_rounddown(vaddr);
281 phys = pg_rounddown(phys);
283 assert(vaddr < kern_vir_start);
285 while(vaddr < vaddr_end) {
286 phys_bytes source = phys;
287 assert(!(vaddr % I386_PAGE_SIZE));
288 if(phys == PG_ALLOCATEME) {
289 source = pg_alloc_page(cbi);
290 } else {
291 assert(!(phys % I386_PAGE_SIZE));
293 assert(!(source % I386_PAGE_SIZE));
294 pde = I386_VM_PDE(vaddr);
295 pte = I386_VM_PTE(vaddr);
296 if(mapped_pde < pde) {
297 phys_bytes ph;
298 pt = alloc_pagetable(&ph);
299 pagedir[pde] = (ph & I386_VM_ADDR_MASK)
300 | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
301 mapped_pde = pde;
303 assert(pt);
304 pt[pte] = (source & I386_VM_ADDR_MASK) |
305 I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
306 vaddr += I386_PAGE_SIZE;
307 if(phys != PG_ALLOCATEME)
308 phys += I386_PAGE_SIZE;
312 void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
314 *pagedir_ph = vir2phys(pagedir);
315 *pagedir_v = pagedir;