2 * x86_64 specific EFI support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
5 * Copyright (C) 2005-2008 Intel Co.
6 * Fenghua Yu <fenghua.yu@intel.com>
7 * Bibo Mao <bibo.mao@intel.com>
8 * Chandramouli Narayanan <mouli@linux.intel.com>
9 * Huang Ying <ying.huang@intel.com>
11 * Code to convert EFI to E820 map has been implemented in elilo bootloader
12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
13 * is setup appropriately for EFI runtime code.
18 #include <linux/kernel.h>
19 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/efi.h>
27 #include <linux/uaccess.h>
29 #include <linux/reboot.h>
31 #include <asm/setup.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/proto.h>
39 static pgd_t save_pgd __initdata
;
40 static unsigned long efi_flags __initdata
;
42 static void __init
early_mapping_set_exec(unsigned long start
,
50 kpte
= lookup_address((unsigned long)__va(start
), &level
);
53 set_pte(kpte
, pte_mkexec(*kpte
));
55 set_pte(kpte
, __pte((pte_val(*kpte
) | _PAGE_NX
) & \
56 __supported_pte_mask
));
57 if (level
== PG_LEVEL_4K
)
58 start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
60 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
64 static void __init
early_runtime_code_mapping_set_exec(int executable
)
66 efi_memory_desc_t
*md
;
69 if (!(__supported_pte_mask
& _PAGE_NX
))
72 /* Make EFI runtime service code area executable */
73 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
75 if (md
->type
== EFI_RUNTIME_SERVICES_CODE
) {
77 end
= md
->phys_addr
+ (md
->num_pages
<< PAGE_SHIFT
);
78 early_mapping_set_exec(md
->phys_addr
, end
, executable
);
83 void __init
efi_call_phys_prelog(void)
85 unsigned long vaddress
;
87 local_irq_save(efi_flags
);
88 early_runtime_code_mapping_set_exec(1);
89 vaddress
= (unsigned long)__va(0x0UL
);
90 save_pgd
= *pgd_offset_k(0x0UL
);
91 set_pgd(pgd_offset_k(0x0UL
), *pgd_offset_k(vaddress
));
95 void __init
efi_call_phys_epilog(void)
98 * After the lock is released, the original page table is restored.
100 set_pgd(pgd_offset_k(0x0UL
), save_pgd
);
101 early_runtime_code_mapping_set_exec(0);
103 local_irq_restore(efi_flags
);
106 void __init
efi_reserve_bootmem(void)
108 reserve_bootmem_generic((unsigned long)memmap
.phys_map
,
109 memmap
.nr_map
* memmap
.desc_size
);
112 void __iomem
* __init
efi_ioremap(unsigned long phys_addr
, unsigned long size
)
114 static unsigned pages_mapped
;
117 /* phys_addr and size must be page aligned */
118 if ((phys_addr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
121 pages
= size
>> PAGE_SHIFT
;
122 if (pages_mapped
+ pages
> MAX_EFI_IO_PAGES
)
125 for (i
= 0; i
< pages
; i
++) {
126 __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE
- pages_mapped
,
127 phys_addr
, PAGE_KERNEL
);
128 phys_addr
+= PAGE_SIZE
;
132 return (void __iomem
*)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE
- \
133 (pages_mapped
- pages
));