1 #include <minix/cpufeature.h>
3 #include <minix/type.h>
5 #include "kernel/kernel.h"
6 #include "arch_proto.h"
7 #include <machine/cpu.h>
8 #include <arm/armreg.h>
11 #include <minix/type.h>
13 /* These are set/computed in kernel.lds. */
14 extern char _kern_vir_base
, _kern_phys_base
, _kern_size
;
16 /* Retrieve the absolute values to something we can use. */
17 static phys_bytes kern_vir_start
= (phys_bytes
) &_kern_vir_base
;
18 static phys_bytes kern_phys_start
= (phys_bytes
) &_kern_phys_base
;
19 static phys_bytes kern_kernlen
= (phys_bytes
) &_kern_size
;
21 /* page directory we can use to map things */
22 static u32_t pagedir
[4096] __aligned(16384);
24 void print_memmap(kinfo_t
*cbi
)
27 assert(cbi
->mmap_size
< MAXMEMMAP
);
28 for(m
= 0; m
< cbi
->mmap_size
; m
++) {
29 phys_bytes addr
= cbi
->memmap
[m
].mm_base_addr
, endit
= cbi
->memmap
[m
].mm_base_addr
+ cbi
->memmap
[m
].mm_length
;
30 printf("%08lx-%08lx ",addr
, endit
);
32 printf("\nsize %08lx\n", cbi
->mmap_size
);
35 void cut_memmap(kinfo_t
*cbi
, phys_bytes start
, phys_bytes end
)
40 if((o
=start
% ARM_PAGE_SIZE
))
42 if((o
=end
% ARM_PAGE_SIZE
))
43 end
+= ARM_PAGE_SIZE
- o
;
45 assert(kernel_may_alloc
);
47 for(m
= 0; m
< cbi
->mmap_size
; m
++) {
48 phys_bytes substart
= start
, subend
= end
;
49 phys_bytes memaddr
= cbi
->memmap
[m
].mm_base_addr
,
50 memend
= cbi
->memmap
[m
].mm_base_addr
+ cbi
->memmap
[m
].mm_length
;
52 /* adjust cut range to be a subset of the free memory */
53 if(substart
< memaddr
) substart
= memaddr
;
54 if(subend
> memend
) subend
= memend
;
55 if(substart
>= subend
) continue;
57 /* if there is any overlap, forget this one and add
60 cbi
->memmap
[m
].mm_base_addr
= cbi
->memmap
[m
].mm_length
= 0;
61 if(substart
> memaddr
)
62 add_memmap(cbi
, memaddr
, substart
-memaddr
);
64 add_memmap(cbi
, subend
, memend
-subend
);
68 void add_memmap(kinfo_t
*cbi
, u64_t addr
, u64_t len
)
71 #define LIMIT 0xFFFFF000
72 /* Truncate available memory at 4GB as the rest of minix
73 * currently can't deal with any bigger.
79 if(addr
+ len
> LIMIT
) {
80 len
-= (addr
+ len
- LIMIT
);
83 assert(cbi
->mmap_size
< MAXMEMMAP
);
87 addr
= roundup(addr
, ARM_PAGE_SIZE
);
88 len
= rounddown(len
, ARM_PAGE_SIZE
);
90 assert(kernel_may_alloc
);
92 for(m
= 0; m
< MAXMEMMAP
; m
++) {
94 if(cbi
->memmap
[m
].mm_length
) {
97 cbi
->memmap
[m
].mm_base_addr
= addr
;
98 cbi
->memmap
[m
].mm_length
= len
;
99 cbi
->memmap
[m
].type
= MULTIBOOT_MEMORY_AVAILABLE
;
100 if(m
>= cbi
->mmap_size
) {
101 cbi
->mmap_size
= m
+1;
103 highmark
= addr
+ len
;
104 if(highmark
> cbi
->mem_high_phys
) {
105 cbi
->mem_high_phys
= highmark
;
110 panic("no available memmap slot");
113 u32_t
*alloc_pagetable(phys_bytes
*ph
)
116 #define PG_PAGETABLES 24
117 static u32_t pagetables
[PG_PAGETABLES
][256] __aligned(1024);
118 static int pt_inuse
= 0;
119 if(pt_inuse
>= PG_PAGETABLES
) {
120 panic("no more pagetables");
122 assert(sizeof(pagetables
[pt_inuse
]) == 1024);
123 ret
= pagetables
[pt_inuse
++];
128 #define PAGE_KB (ARM_PAGE_SIZE / 1024)
130 phys_bytes
pg_alloc_page(kinfo_t
*cbi
)
133 multiboot_memory_map_t
*mmap
;
135 assert(kernel_may_alloc
);
137 for(m
= 0; m
< cbi
->mmap_size
; m
++) {
138 mmap
= &cbi
->memmap
[m
];
139 if(!mmap
->mm_length
) {
142 assert(mmap
->mm_length
> 0);
143 assert(!(mmap
->mm_length
% ARM_PAGE_SIZE
));
144 assert(!(mmap
->mm_base_addr
% ARM_PAGE_SIZE
));
146 u32_t addr
= mmap
->mm_base_addr
;
147 mmap
->mm_base_addr
+= ARM_PAGE_SIZE
;
148 mmap
->mm_length
-= ARM_PAGE_SIZE
;
150 cbi
->kernel_allocated_bytes_dynamic
+= ARM_PAGE_SIZE
;
155 panic("can't find free memory");
158 void pg_identity(kinfo_t
*cbi
)
163 /* We map memory that does not correspond to physical memory
164 * as non-cacheable. Make sure we know what it is.
166 assert(cbi
->mem_high_phys
);
168 /* Set up an identity mapping page directory */
169 for(i
= 0; i
< ARM_VM_DIR_ENTRIES
; i
++) {
170 u32_t flags
= ARM_VM_SECTION
171 | ARM_VM_SECTION_USER
172 | ARM_VM_SECTION_DOMAIN
;
174 phys
= i
* ARM_SECTION_SIZE
;
175 /* mark mormal memory as cacheable. TODO: fix hard coded values */
176 if (phys
>= PHYS_MEM_BEGIN
&& phys
<= PHYS_MEM_END
) {
177 pagedir
[i
] = phys
| flags
| ARM_VM_SECTION_CACHED
;
179 pagedir
[i
] = phys
| flags
| ARM_VM_SECTION_DEVICE
;
184 int pg_mapkernel(void)
187 u32_t mapped
= 0, kern_phys
= kern_phys_start
;
189 assert(!(kern_vir_start
% ARM_SECTION_SIZE
));
190 assert(!(kern_phys_start
% ARM_SECTION_SIZE
));
191 pde
= kern_vir_start
/ ARM_SECTION_SIZE
; /* start pde */
192 while(mapped
< kern_kernlen
) {
193 pagedir
[pde
] = (kern_phys
& ARM_VM_SECTION_MASK
)
195 | ARM_VM_SECTION_SUPER
196 | ARM_VM_SECTION_DOMAIN
197 | ARM_VM_SECTION_CACHED
;
198 mapped
+= ARM_SECTION_SIZE
;
199 kern_phys
+= ARM_SECTION_SIZE
;
202 return pde
; /* free pde */
205 void vm_enable_paging(void)
212 /* Set all Domains to Client */
213 write_dacr(0x55555555);
215 sctlr
= read_sctlr();
218 sctlr
|= CPU_CONTROL_MMU_ENABLE
;
220 /* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
221 sctlr
&= ~CPU_CONTROL_TR_ENABLE
;
223 /* AFE set to zero (default reset value): not using simplified model. */
224 sctlr
&= ~CPU_CONTROL_AF_ENABLE
;
226 /* Enable instruction ,data cache and branch prediction */
227 sctlr
|= CPU_CONTROL_DC_ENABLE
;
228 sctlr
|= CPU_CONTROL_IC_ENABLE
;
229 sctlr
|= CPU_CONTROL_BPRD_ENABLE
;
231 /* Enable barriers */
232 sctlr
|= CPU_CONTROL_32BD_ENABLE
;
234 /* Enable L2 cache (cortex-a8) */
235 #define CORTEX_A8_L2EN (0x02)
236 actlr
= read_actlr();
237 actlr
|= CORTEX_A8_L2EN
;
243 phys_bytes
pg_load(void)
245 phys_bytes phpagedir
= vir2phys(pagedir
);
246 write_ttbr0(phpagedir
);
252 memset(pagedir
, 0, sizeof(pagedir
));
255 phys_bytes
pg_rounddown(phys_bytes b
)
258 if(!(o
= b
% ARM_PAGE_SIZE
)) {
264 void pg_map(phys_bytes phys
, vir_bytes vaddr
, vir_bytes vaddr_end
,
267 static int mapped_pde
= -1;
268 static u32_t
*pt
= NULL
;
271 assert(kernel_may_alloc
);
273 if(phys
== PG_ALLOCATEME
) {
274 assert(!(vaddr
% ARM_PAGE_SIZE
));
276 assert((vaddr
% ARM_PAGE_SIZE
) == (phys
% ARM_PAGE_SIZE
));
277 vaddr
= pg_rounddown(vaddr
);
278 phys
= pg_rounddown(phys
);
280 assert(vaddr
< kern_vir_start
);
282 while(vaddr
< vaddr_end
) {
283 phys_bytes source
= phys
;
284 assert(!(vaddr
% ARM_PAGE_SIZE
));
285 if(phys
== PG_ALLOCATEME
) {
286 source
= pg_alloc_page(cbi
);
288 assert(!(phys
% ARM_PAGE_SIZE
));
290 assert(!(source
% ARM_PAGE_SIZE
));
291 pde
= ARM_VM_PDE(vaddr
);
292 pte
= ARM_VM_PTE(vaddr
);
293 if(mapped_pde
< pde
) {
295 pt
= alloc_pagetable(&ph
);
296 pagedir
[pde
] = (ph
& ARM_VM_PDE_MASK
)
302 pt
[pte
] = (source
& ARM_VM_PTE_MASK
)
306 vaddr
+= ARM_PAGE_SIZE
;
307 if(phys
!= PG_ALLOCATEME
) {
308 phys
+= ARM_PAGE_SIZE
;
313 void pg_info(reg_t
*pagedir_ph
, u32_t
**pagedir_v
)
315 *pagedir_ph
= vir2phys(pagedir
);
316 *pagedir_v
= pagedir
;