2 * Helper routines for building identity mapping page tables. This is
3 * included by both the compressed kernel and the regular kernel.
6 static void ident_pmd_init(struct x86_mapping_info
*info
, pmd_t
*pmd_page
,
7 unsigned long addr
, unsigned long end
)
10 for (; addr
< end
; addr
+= PMD_SIZE
) {
11 pmd_t
*pmd
= pmd_page
+ pmd_index(addr
);
13 if (pmd_present(*pmd
))
16 set_pmd(pmd
, __pmd((addr
- info
->offset
) | info
->pmd_flag
));
20 static int ident_pud_init(struct x86_mapping_info
*info
, pud_t
*pud_page
,
21 unsigned long addr
, unsigned long end
)
25 for (; addr
< end
; addr
= next
) {
26 pud_t
*pud
= pud_page
+ pud_index(addr
);
29 next
= (addr
& PUD_MASK
) + PUD_SIZE
;
33 if (pud_present(*pud
)) {
34 pmd
= pmd_offset(pud
, 0);
35 ident_pmd_init(info
, pmd
, addr
, next
);
38 pmd
= (pmd_t
*)info
->alloc_pgt_page(info
->context
);
41 ident_pmd_init(info
, pmd
, addr
, next
);
42 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
));
48 int kernel_ident_mapping_init(struct x86_mapping_info
*info
, pgd_t
*pgd_page
,
49 unsigned long pstart
, unsigned long pend
)
51 unsigned long addr
= pstart
+ info
->offset
;
52 unsigned long end
= pend
+ info
->offset
;
56 for (; addr
< end
; addr
= next
) {
57 pgd_t
*pgd
= pgd_page
+ pgd_index(addr
);
60 next
= (addr
& PGDIR_MASK
) + PGDIR_SIZE
;
64 if (pgd_present(*pgd
)) {
65 pud
= pud_offset(pgd
, 0);
66 result
= ident_pud_init(info
, pud
, addr
, next
);
72 pud
= (pud_t
*)info
->alloc_pgt_page(info
->context
);
75 result
= ident_pud_init(info
, pud
, addr
, next
);
78 set_pgd(pgd
, __pgd(__pa(pud
) | _KERNPG_TABLE
));