added concrete implementations of putc(), getc(), getchar() and gets()
[tangerine.git] / arch / x86_64-pc / kernel / mmu.c
blob71f0a50ad8d5f190835f799f159e2da089fb1266
1 #include <asm/cpu.h>
3 #include "kernel_intern.h"
5 /*
6 The MMU pages and directories. They are stored at fixed location and may be either reused in the
7 64-bit kernel, or replaced by it. Four PDE directories (PDE2M structures) are enough to map whole
8 4GB address space.
9 */
10 static struct PML4E PML4[512] __attribute__((used,aligned(4096)));
11 static struct PDPE PDP[512] __attribute__((used,aligned(4096)));
12 static struct PDE2M PDE[4][512] __attribute__((used,aligned(4096)));
14 void core_SetupMMU()
16 int i;
17 struct PDE2M *pdes[] = { &PDE[0], &PDE[1], &PDE[2], &PDE[3] };
19 rkprintf("[Kernel] Re-creating the MMU pages for first 4GB area\n");
21 /* PML4 Entry - we need only the first out of 16 entries */
22 PML4[0].p = 1; /* present */
23 PML4[0].rw = 1; /* read/write */
24 PML4[0].us = 1; /* accessible for user */
25 PML4[0].pwt= 0; /* write-through cache */
26 PML4[0].pcd= 0; /* cache enabled */
27 PML4[0].a = 0; /* not yet accessed */
28 PML4[0].mbz= 0; /* must be zero */
29 PML4[0].base_low = (unsigned int)PDP >> 12;
30 PML4[0].avl= 0;
31 PML4[0].nx = 0;
32 PML4[0].avail = 0;
33 PML4[0].base_high = 0;
36 PDP Entries. There are four of them used in order to define 2048 pages of 2MB each.
38 for (i=0; i < 4; i++)
40 int j;
42 /* Set the PDP entry up and point to the PDE table */
43 PDP[i].p = 1;
44 PDP[i].rw = 1;
45 PDP[i].us = 1;
46 PDP[i].pwt= 0;
47 PDP[i].pcd= 0;
48 PDP[i].a = 0;
49 PDP[i].mbz= 0;
50 PDP[i].base_low = (unsigned int)pdes[i] >> 12;
52 PDP[i].nx = 0;
53 PDP[i].avail = 0;
54 PDP[i].base_high = 0;
56 for (j=0; j < 512; j++)
58 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
60 struct PDE2M *PDE = pdes[i];
61 PDE[j].p = 1;
62 PDE[j].rw = 1;
63 PDE[j].us = 1;
64 PDE[j].pwt= 0; // 1
65 PDE[j].pcd= 0; // 1
66 PDE[j].a = 0;
67 PDE[j].d = 0;
68 PDE[j].g = 0;
69 PDE[j].pat= 0;
70 PDE[j].ps = 1;
71 PDE[j].base_low = ((i << 30) + (j << 21)) >> 13;
73 PDE[j].avail = 0;
74 PDE[j].nx = 0;
75 PDE[j].base_high = 0;
79 wrcr(cr3, &PML4);
80 rkprintf("[Kernel] PML4 @ %012p\n", &PML4);
83 static struct PTE Pages4K[32][512] __attribute__((used,aligned(4096)));
84 static int used_page;
86 void core_ProtPage(intptr_t addr, char p, char rw, char us)
88 struct PML4E *pml4 = rdcr(cr3);
89 struct PDPE *pdpe = pml4[(addr >> 39) & 0x1ff].base_low << 12;
90 struct PDE4K *pde = pdpe[(addr >> 30) & 0x1ff].base_low << 12;
92 rkprintf("[Kernel] Marking page %012p as read-only\n",addr);
94 if (pde[(addr >> 21) & 0x1ff].ps)
96 struct PTE *pte = Pages4K[used_page++];
97 struct PDE2M *pde2 = (struct PDE2M *)pde;
99 /* work on local copy of the affected PDE */
100 struct PDE4K tmp_pde = pde[(addr >> 21) & 0x1ff];
102 intptr_t base = pde2[(addr >> 21) & 0x1ff].base_low << 13;
103 int i;
105 rkprintf("[Kernel] The page for address %012p was a big one. Splitting it into 4K pages\n",
106 addr);
107 rkprintf("[Kernel] Base=%012p, pte=%012p\n", base, pte);
109 for (i = 0; i < 512; i++)
111 pte[i].p = 1;
112 pte[i].rw = pde2[(addr >> 21) & 0x1ff].rw;
113 pte[i].us = pde2[(addr >> 21) & 0x1ff].us;
114 pte[i].pwt = pde2[(addr >> 21) & 0x1ff].pwt;
115 pte[i].pcd = pde2[(addr >> 21) & 0x1ff].pcd;
116 pte[i].base_low = base >> 12;
117 base += 4096;
120 tmp_pde.ps = 0;
121 tmp_pde.base_low = ((intptr_t)pte) >> 12;
123 pde[(addr >> 21) & 0x1ff] = tmp_pde;
126 struct PTE *pte = pde[(addr >> 21) & 0x1ff].base_low << 12;
127 pte[(addr >> 12) & 0x1ff].rw = rw ? 1:0;
128 pte[(addr >> 12) & 0x1ff].us = us ? 1:0;
129 pte[(addr >> 12) & 0x1ff].p = p ? 1:0;
130 asm volatile ("invlpg (%0)"::"r"(addr));
133 void core_ProtKernelArea(intptr_t addr, intptr_t length, char p, char rw, char us)
135 rkprintf("[Kernel] Protecting area %012p-%012p\n", addr, addr + length - 1);
137 while (length > 0)
139 core_ProtPage(addr, p, rw, us);
140 addr += 4096;
141 length -= 4096;