2 * This code is used on x86_64 to create page table identity mappings on
3 * demand by building up a new set of page tables (or appending to the
4 * existing ones), and then switching over to them when ready.
8 * Since we're dealing with identity mappings, physical and virtual
9 * addresses are the same, so override these defines which are ultimately
10 * used by the headers in misc.h.
12 #define __pa(x) ((unsigned long)(x))
13 #define __va(x) ((void *)((unsigned long)(x)))
17 /* These actually do the work of building the kernel identity maps. */
19 #include <asm/pgtable.h>
20 #include "../../mm/ident_map.c"
22 /* Used by pgtable.h asm code to force instruction serialization. */
23 unsigned long __force_order
;
25 /* Used to track our page table allocation area. */
26 struct alloc_pgt_data
{
27 unsigned char *pgt_buf
;
28 unsigned long pgt_buf_size
;
29 unsigned long pgt_buf_offset
;
33 * Allocates space for a page table entry, using struct alloc_pgt_data
34 * above. Besides the local callers, this is used as the allocation
35 * callback in mapping_info below.
37 static void *alloc_pgt_page(void *context
)
39 struct alloc_pgt_data
*pages
= (struct alloc_pgt_data
*)context
;
42 /* Validate there is space available for a new page. */
43 if (pages
->pgt_buf_offset
>= pages
->pgt_buf_size
) {
44 debug_putstr("out of pgt_buf in " __FILE__
"!?\n");
45 debug_putaddr(pages
->pgt_buf_offset
);
46 debug_putaddr(pages
->pgt_buf_size
);
50 entry
= pages
->pgt_buf
+ pages
->pgt_buf_offset
;
51 pages
->pgt_buf_offset
+= PAGE_SIZE
;
56 /* Used to track our allocated page tables. */
57 static struct alloc_pgt_data pgt_data
;
59 /* The top level page table entry pointer. */
60 static unsigned long level4p
;
62 /* Locates and clears a region for a new top level page table. */
63 static void prepare_level4(void)
66 * It should be impossible for this not to already be true,
67 * but since calling this a second time would rewind the other
68 * counters, let's just make sure this is reset too.
70 pgt_data
.pgt_buf_offset
= 0;
73 * If we came here via startup_32(), cr3 will be _pgtable already
74 * and we must append to the existing area instead of entirely
78 if (level4p
== (unsigned long)_pgtable
) {
79 debug_putstr("booted via startup_32()\n");
80 pgt_data
.pgt_buf
= _pgtable
+ BOOT_INIT_PGT_SIZE
;
81 pgt_data
.pgt_buf_size
= BOOT_PGT_SIZE
- BOOT_INIT_PGT_SIZE
;
82 memset(pgt_data
.pgt_buf
, 0, pgt_data
.pgt_buf_size
);
84 debug_putstr("booted via startup_64()\n");
85 pgt_data
.pgt_buf
= _pgtable
;
86 pgt_data
.pgt_buf_size
= BOOT_PGT_SIZE
;
87 memset(pgt_data
.pgt_buf
, 0, pgt_data
.pgt_buf_size
);
88 level4p
= (unsigned long)alloc_pgt_page(&pgt_data
);
93 * Adds the specified range to what will become the new identity mappings.
94 * Once all ranges have been added, the new mapping is activated by calling
95 * finalize_identity_maps() below.
97 void add_identity_map(unsigned long start
, unsigned long size
)
99 struct x86_mapping_info mapping_info
= {
100 .alloc_pgt_page
= alloc_pgt_page
,
101 .context
= &pgt_data
,
102 .pmd_flag
= __PAGE_KERNEL_LARGE_EXEC
,
104 unsigned long end
= start
+ size
;
106 /* Make sure we have a top level page table ready to use. */
110 /* Align boundary to 2M. */
111 start
= round_down(start
, PMD_SIZE
);
112 end
= round_up(end
, PMD_SIZE
);
116 /* Build the mapping. */
117 kernel_ident_mapping_init(&mapping_info
, (pgd_t
*)level4p
,
122 * This switches the page tables to the new level4 that has been built
123 * via calls to add_identity_map() above. If booted via startup_32(),
124 * this is effectively a no-op.
126 void finalize_identity_maps(void)