coverity appeasement - redundant check
[minix.git] / kernel / arch / i386 / pg_utils.c
blobd14563ce8965d62006428796519cb758e55c59b3
2 #include <minix/cpufeature.h>
4 #include <assert.h>
5 #include "kernel.h"
6 #include <libexec.h>
7 #include "arch_proto.h"
9 #include <string.h>
10 #include <libexec.h>
12 /* These are set/computed in kernel.lds. */
13 extern char _kern_vir_base, _kern_phys_base, _kern_size;
15 /* Retrieve the absolute values to something we can use. */
16 static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
17 static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
18 static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
20 /* page directory we can use to map things */
21 static u32_t pagedir[1024] __aligned(4096);
23 void print_memmap(kinfo_t *cbi)
25 int m;
26 assert(cbi->mmap_size < MAXMEMMAP);
27 for(m = 0; m < cbi->mmap_size; m++) {
28 phys_bytes addr = cbi->memmap[m].addr, endit = cbi->memmap[m].addr + cbi->memmap[m].len;
29 printf("%08lx-%08lx ",addr, endit);
31 printf("\nsize %08lx\n", cbi->mmap_size);
34 void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
36 int m;
37 phys_bytes o;
39 if((o=start % I386_PAGE_SIZE))
40 start -= o;
41 if((o=end % I386_PAGE_SIZE))
42 end += I386_PAGE_SIZE - o;
44 assert(kernel_may_alloc);
46 for(m = 0; m < cbi->mmap_size; m++) {
47 phys_bytes substart = start, subend = end;
48 phys_bytes memaddr = cbi->memmap[m].addr,
49 memend = cbi->memmap[m].addr + cbi->memmap[m].len;
51 /* adjust cut range to be a subset of the free memory */
52 if(substart < memaddr) substart = memaddr;
53 if(subend > memend) subend = memend;
54 if(substart >= subend) continue;
56 /* if there is any overlap, forget this one and add
57 * 1-2 subranges back
59 cbi->memmap[m].addr = cbi->memmap[m].len = 0;
60 if(substart > memaddr)
61 add_memmap(cbi, memaddr, substart-memaddr);
62 if(subend < memend)
63 add_memmap(cbi, subend, memend-subend);
67 phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len)
69 /* Allocate the lowest physical page we have. */
70 int m;
71 #define EMPTY 0xffffffff
72 phys_bytes lowest = EMPTY;
73 assert(len > 0);
74 len = roundup(len, I386_PAGE_SIZE);
76 assert(kernel_may_alloc);
78 for(m = 0; m < cbi->mmap_size; m++) {
79 if(cbi->memmap[m].len < len) continue;
80 if(cbi->memmap[m].addr < lowest) lowest = cbi->memmap[m].addr;
82 assert(lowest != EMPTY);
83 cut_memmap(cbi, lowest, len);
84 return lowest;
87 void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
89 int m;
90 #define LIMIT 0xFFFFF000
91 /* Truncate available memory at 4GB as the rest of minix
92 * currently can't deal with any bigger.
94 if(addr > LIMIT) return;
95 if(addr + len > LIMIT) {
96 len -= (addr + len - LIMIT);
98 assert(cbi->mmap_size < MAXMEMMAP);
99 if(len == 0) return;
100 addr = roundup(addr, I386_PAGE_SIZE);
101 len = rounddown(len, I386_PAGE_SIZE);
103 assert(kernel_may_alloc);
105 for(m = 0; m < MAXMEMMAP; m++) {
106 phys_bytes highmark;
107 if(cbi->memmap[m].len) continue;
108 cbi->memmap[m].addr = addr;
109 cbi->memmap[m].len = len;
110 cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
111 if(m >= cbi->mmap_size)
112 cbi->mmap_size = m+1;
113 highmark = addr + len;
114 if(highmark > cbi->mem_high_phys) {
115 cbi->mem_high_phys = highmark;
118 return;
121 panic("no available memmap slot");
124 u32_t *alloc_pagetable(phys_bytes *ph)
126 u32_t *ret;
127 #define PG_PAGETABLES 6
128 static u32_t pagetables[PG_PAGETABLES][1024] __aligned(4096);
129 static int pt_inuse = 0;
130 if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
131 assert(sizeof(pagetables[pt_inuse]) == I386_PAGE_SIZE);
132 ret = pagetables[pt_inuse++];
133 *ph = vir2phys(ret);
134 return ret;
137 #define PAGE_KB (I386_PAGE_SIZE / 1024)
139 phys_bytes pg_alloc_page(kinfo_t *cbi)
141 int m;
142 multiboot_memory_map_t *mmap;
144 assert(kernel_may_alloc);
146 for(m = cbi->mmap_size-1; m >= 0; m--) {
147 mmap = &cbi->memmap[m];
148 if(!mmap->len) continue;
149 assert(mmap->len > 0);
150 assert(!(mmap->len % I386_PAGE_SIZE));
151 assert(!(mmap->addr % I386_PAGE_SIZE));
153 mmap->len -= I386_PAGE_SIZE;
155 return mmap->addr + mmap->len;
158 panic("can't find free memory");
161 void pg_identity(kinfo_t *cbi)
163 int i;
164 phys_bytes phys;
166 /* We map memory that does not correspond to physical memory
167 * as non-cacheable. Make sure we know what it is.
169 assert(cbi->mem_high_phys);
171 /* Set up an identity mapping page directory */
172 for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
173 u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE |
174 I386_VM_USER | I386_VM_WRITE;
175 phys = i * I386_BIG_PAGE_SIZE;
176 if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
177 <= (phys & I386_VM_ADDR_MASK_4MB)) {
178 flags |= I386_VM_PWT | I386_VM_PCD;
180 pagedir[i] = phys | flags;
184 int pg_mapkernel(void)
186 int pde;
187 u32_t mapped = 0, kern_phys = kern_phys_start;
189 assert(!(kern_vir_start % I386_BIG_PAGE_SIZE));
190 assert(!(kern_phys % I386_BIG_PAGE_SIZE));
191 pde = kern_vir_start / I386_BIG_PAGE_SIZE; /* start pde */
192 while(mapped < kern_kernlen) {
193 pagedir[pde] = kern_phys | I386_VM_PRESENT |
194 I386_VM_BIGPAGE | I386_VM_WRITE;
195 mapped += I386_BIG_PAGE_SIZE;
196 kern_phys += I386_BIG_PAGE_SIZE;
197 pde++;
199 return pde; /* free pde */
202 void vm_enable_paging(void)
204 u32_t cr0, cr4;
205 int pgeok;
207 pgeok = _cpufeature(_CPUF_I386_PGE);
209 cr0= read_cr0();
210 cr4= read_cr4();
212 /* The boot loader should have put us in protected mode. */
213 assert(cr0 & I386_CR0_PE);
215 /* First clear PG and PGE flag, as PGE must be enabled after PG. */
216 write_cr0(cr0 & ~I386_CR0_PG);
217 write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));
219 cr0= read_cr0();
220 cr4= read_cr4();
222 /* Our page table contains 4MB entries. */
223 cr4 |= I386_CR4_PSE;
225 write_cr4(cr4);
227 /* First enable paging, then enable global page flag. */
228 cr0 |= I386_CR0_PG;
229 write_cr0(cr0);
230 cr0 |= I386_CR0_WP;
231 write_cr0(cr0);
233 /* May we enable these features? */
234 if(pgeok)
235 cr4 |= I386_CR4_PGE;
237 write_cr4(cr4);
240 phys_bytes pg_load()
242 phys_bytes phpagedir = vir2phys(pagedir);
243 write_cr3(phpagedir);
244 return phpagedir;
247 void pg_clear(void)
249 memset(pagedir, 0, sizeof(pagedir));
252 phys_bytes pg_rounddown(phys_bytes b)
254 phys_bytes o;
255 if(!(o = b % I386_PAGE_SIZE))
256 return b;
257 return b - o;
260 void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
261 kinfo_t *cbi)
263 static int mapped_pde = -1;
264 static u32_t *pt = NULL;
265 int pde, pte;
267 assert(kernel_may_alloc);
269 if(phys == PG_ALLOCATEME) {
270 assert(!(vaddr % I386_PAGE_SIZE));
271 } else {
272 assert((vaddr % I386_PAGE_SIZE) == (phys % I386_PAGE_SIZE));
273 vaddr = pg_rounddown(vaddr);
274 phys = pg_rounddown(phys);
276 assert(vaddr < kern_vir_start);
278 while(vaddr < vaddr_end) {
279 phys_bytes source = phys;
280 assert(!(vaddr % I386_PAGE_SIZE));
281 if(phys == PG_ALLOCATEME) {
282 source = pg_alloc_page(cbi);
283 } else {
284 assert(!(phys % I386_PAGE_SIZE));
286 assert(!(source % I386_PAGE_SIZE));
287 pde = I386_VM_PDE(vaddr);
288 pte = I386_VM_PTE(vaddr);
289 if(mapped_pde < pde) {
290 phys_bytes ph;
291 pt = alloc_pagetable(&ph);
292 pagedir[pde] = (ph & I386_VM_ADDR_MASK)
293 | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
294 mapped_pde = pde;
296 assert(pt);
297 pt[pte] = (source & I386_VM_ADDR_MASK) |
298 I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
299 vaddr += I386_PAGE_SIZE;
300 if(phys != PG_ALLOCATEME)
301 phys += I386_PAGE_SIZE;
305 void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
307 *pagedir_ph = vir2phys(pagedir);
308 *pagedir_v = pagedir;