2 * Common EFI (Extensible Firmware Interface) support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
5 * Copyright (C) 1999 VA Linux Systems
6 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
7 * Copyright (C) 1999-2002 Hewlett-Packard Co.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * Copyright (C) 2005-2008 Intel Co.
11 * Fenghua Yu <fenghua.yu@intel.com>
12 * Bibo Mao <bibo.mao@intel.com>
13 * Chandramouli Narayanan <mouli@linux.intel.com>
14 * Huang Ying <ying.huang@intel.com>
15 * Copyright (C) 2013 SuSE Labs
16 * Borislav Petkov <bp@suse.de> - runtime services VA mapping
18 * Copied from efi_32.c to eliminate the duplicated code between EFI
19 * 32/64 support code. --ying 2007-10-26
21 * All EFI Runtime Services are not implemented yet as EFI only
22 * supports physical mode addressing on SoftSDV. This is to be fixed
23 * in a future version. --drummond 1999-07-20
25 * Implemented EFI runtime services and virtual mode calls. --davidm
27 * Goutham Rao: <goutham.rao@intel.com>
28 * Skip non-WB memory and ignore empty memory ranges.
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/efi.h>
36 #include <linux/efi-bgrt.h>
37 #include <linux/export.h>
38 #include <linux/bootmem.h>
39 #include <linux/slab.h>
40 #include <linux/memblock.h>
41 #include <linux/spinlock.h>
42 #include <linux/uaccess.h>
43 #include <linux/time.h>
45 #include <linux/reboot.h>
46 #include <linux/bcd.h>
48 #include <asm/setup.h>
51 #include <asm/cacheflush.h>
52 #include <asm/tlbflush.h>
53 #include <asm/x86_init.h>
55 #include <asm/uv/uv.h>
59 struct efi_memory_map memmap
;
61 static struct efi efi_phys __initdata
;
62 static efi_system_table_t efi_systab __initdata
;
64 static efi_config_table_type_t arch_tables
[] __initdata
= {
66 {UV_SYSTEM_TABLE_GUID
, "UVsystab", &efi
.uv_systab
},
68 {NULL_GUID
, NULL
, NULL
},
71 u64 efi_setup
; /* efi setup_data physical address */
73 static int add_efi_memmap __initdata
;
74 static int __init
setup_add_efi_memmap(char *arg
)
79 early_param("add_efi_memmap", setup_add_efi_memmap
);
81 static efi_status_t __init
phys_efi_set_virtual_address_map(
82 unsigned long memory_map_size
,
83 unsigned long descriptor_size
,
84 u32 descriptor_version
,
85 efi_memory_desc_t
*virtual_map
)
91 save_pgd
= efi_call_phys_prolog();
93 /* Disable interrupts around EFI calls: */
94 local_irq_save(flags
);
95 status
= efi_call_phys(efi_phys
.set_virtual_address_map
,
96 memory_map_size
, descriptor_size
,
97 descriptor_version
, virtual_map
);
98 local_irq_restore(flags
);
100 efi_call_phys_epilog(save_pgd
);
105 void efi_get_time(struct timespec
*now
)
111 status
= efi
.get_time(&eft
, &cap
);
112 if (status
!= EFI_SUCCESS
)
113 pr_err("Oops: efitime: can't read time!\n");
115 now
->tv_sec
= mktime(eft
.year
, eft
.month
, eft
.day
, eft
.hour
,
116 eft
.minute
, eft
.second
);
120 void __init
efi_find_mirror(void)
123 u64 mirror_size
= 0, total_size
= 0;
125 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
126 efi_memory_desc_t
*md
= p
;
127 unsigned long long start
= md
->phys_addr
;
128 unsigned long long size
= md
->num_pages
<< EFI_PAGE_SHIFT
;
131 if (md
->attribute
& EFI_MEMORY_MORE_RELIABLE
) {
132 memblock_mark_mirror(start
, size
);
137 pr_info("Memory: %lldM/%lldM mirrored memory\n",
138 mirror_size
>>20, total_size
>>20);
142 * Tell the kernel about the EFI memory map. This might include
143 * more than the max 128 entries that can fit in the e820 legacy
144 * (zeropage) memory map.
147 static void __init
do_add_efi_memmap(void)
151 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
152 efi_memory_desc_t
*md
= p
;
153 unsigned long long start
= md
->phys_addr
;
154 unsigned long long size
= md
->num_pages
<< EFI_PAGE_SHIFT
;
158 case EFI_LOADER_CODE
:
159 case EFI_LOADER_DATA
:
160 case EFI_BOOT_SERVICES_CODE
:
161 case EFI_BOOT_SERVICES_DATA
:
162 case EFI_CONVENTIONAL_MEMORY
:
163 if (md
->attribute
& EFI_MEMORY_WB
)
164 e820_type
= E820_RAM
;
166 e820_type
= E820_RESERVED
;
168 case EFI_ACPI_RECLAIM_MEMORY
:
169 e820_type
= E820_ACPI
;
171 case EFI_ACPI_MEMORY_NVS
:
172 e820_type
= E820_NVS
;
174 case EFI_UNUSABLE_MEMORY
:
175 e820_type
= E820_UNUSABLE
;
177 case EFI_PERSISTENT_MEMORY
:
178 e820_type
= E820_PMEM
;
182 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
183 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
184 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
186 e820_type
= E820_RESERVED
;
189 e820_add_region(start
, size
, e820_type
);
191 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
194 int __init
efi_memblock_x86_reserve_range(void)
196 struct efi_info
*e
= &boot_params
.efi_info
;
199 if (efi_enabled(EFI_PARAVIRT
))
203 /* Can't handle data above 4GB at this time */
204 if (e
->efi_memmap_hi
) {
205 pr_err("Memory map is above 4GB, disabling EFI.\n");
208 pmap
= e
->efi_memmap
;
210 pmap
= (e
->efi_memmap
| ((__u64
)e
->efi_memmap_hi
<< 32));
212 memmap
.phys_map
= pmap
;
213 memmap
.nr_map
= e
->efi_memmap_size
/
215 memmap
.desc_size
= e
->efi_memdesc_size
;
216 memmap
.desc_version
= e
->efi_memdesc_version
;
218 memblock_reserve(pmap
, memmap
.nr_map
* memmap
.desc_size
);
220 efi
.memmap
= &memmap
;
225 void __init
efi_print_memmap(void)
228 efi_memory_desc_t
*md
;
232 for (p
= memmap
.map
, i
= 0;
234 p
+= memmap
.desc_size
, i
++) {
238 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
239 i
, efi_md_typeattr_format(buf
, sizeof(buf
), md
),
241 md
->phys_addr
+ (md
->num_pages
<< EFI_PAGE_SHIFT
),
242 (md
->num_pages
>> (20 - EFI_PAGE_SHIFT
)));
244 #endif /* EFI_DEBUG */
247 void __init
efi_unmap_memmap(void)
249 clear_bit(EFI_MEMMAP
, &efi
.flags
);
251 early_memunmap(memmap
.map
, memmap
.nr_map
* memmap
.desc_size
);
256 static int __init
efi_systab_init(void *phys
)
258 if (efi_enabled(EFI_64BIT
)) {
259 efi_system_table_64_t
*systab64
;
260 struct efi_setup_data
*data
= NULL
;
264 data
= early_memremap(efi_setup
, sizeof(*data
));
268 systab64
= early_memremap((unsigned long)phys
,
270 if (systab64
== NULL
) {
271 pr_err("Couldn't map the system table!\n");
273 early_memunmap(data
, sizeof(*data
));
277 efi_systab
.hdr
= systab64
->hdr
;
278 efi_systab
.fw_vendor
= data
? (unsigned long)data
->fw_vendor
:
280 tmp
|= data
? data
->fw_vendor
: systab64
->fw_vendor
;
281 efi_systab
.fw_revision
= systab64
->fw_revision
;
282 efi_systab
.con_in_handle
= systab64
->con_in_handle
;
283 tmp
|= systab64
->con_in_handle
;
284 efi_systab
.con_in
= systab64
->con_in
;
285 tmp
|= systab64
->con_in
;
286 efi_systab
.con_out_handle
= systab64
->con_out_handle
;
287 tmp
|= systab64
->con_out_handle
;
288 efi_systab
.con_out
= systab64
->con_out
;
289 tmp
|= systab64
->con_out
;
290 efi_systab
.stderr_handle
= systab64
->stderr_handle
;
291 tmp
|= systab64
->stderr_handle
;
292 efi_systab
.stderr
= systab64
->stderr
;
293 tmp
|= systab64
->stderr
;
294 efi_systab
.runtime
= data
?
295 (void *)(unsigned long)data
->runtime
:
296 (void *)(unsigned long)systab64
->runtime
;
297 tmp
|= data
? data
->runtime
: systab64
->runtime
;
298 efi_systab
.boottime
= (void *)(unsigned long)systab64
->boottime
;
299 tmp
|= systab64
->boottime
;
300 efi_systab
.nr_tables
= systab64
->nr_tables
;
301 efi_systab
.tables
= data
? (unsigned long)data
->tables
:
303 tmp
|= data
? data
->tables
: systab64
->tables
;
305 early_memunmap(systab64
, sizeof(*systab64
));
307 early_memunmap(data
, sizeof(*data
));
310 pr_err("EFI data located above 4GB, disabling EFI.\n");
315 efi_system_table_32_t
*systab32
;
317 systab32
= early_memremap((unsigned long)phys
,
319 if (systab32
== NULL
) {
320 pr_err("Couldn't map the system table!\n");
324 efi_systab
.hdr
= systab32
->hdr
;
325 efi_systab
.fw_vendor
= systab32
->fw_vendor
;
326 efi_systab
.fw_revision
= systab32
->fw_revision
;
327 efi_systab
.con_in_handle
= systab32
->con_in_handle
;
328 efi_systab
.con_in
= systab32
->con_in
;
329 efi_systab
.con_out_handle
= systab32
->con_out_handle
;
330 efi_systab
.con_out
= systab32
->con_out
;
331 efi_systab
.stderr_handle
= systab32
->stderr_handle
;
332 efi_systab
.stderr
= systab32
->stderr
;
333 efi_systab
.runtime
= (void *)(unsigned long)systab32
->runtime
;
334 efi_systab
.boottime
= (void *)(unsigned long)systab32
->boottime
;
335 efi_systab
.nr_tables
= systab32
->nr_tables
;
336 efi_systab
.tables
= systab32
->tables
;
338 early_memunmap(systab32
, sizeof(*systab32
));
341 efi
.systab
= &efi_systab
;
344 * Verify the EFI Table
346 if (efi
.systab
->hdr
.signature
!= EFI_SYSTEM_TABLE_SIGNATURE
) {
347 pr_err("System table signature incorrect!\n");
350 if ((efi
.systab
->hdr
.revision
>> 16) == 0)
351 pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
352 efi
.systab
->hdr
.revision
>> 16,
353 efi
.systab
->hdr
.revision
& 0xffff);
355 set_bit(EFI_SYSTEM_TABLES
, &efi
.flags
);
360 static int __init
efi_runtime_init32(void)
362 efi_runtime_services_32_t
*runtime
;
364 runtime
= early_memremap((unsigned long)efi
.systab
->runtime
,
365 sizeof(efi_runtime_services_32_t
));
367 pr_err("Could not map the runtime service table!\n");
372 * We will only need *early* access to the SetVirtualAddressMap
373 * EFI runtime service. All other runtime services will be called
374 * via the virtual mapping.
376 efi_phys
.set_virtual_address_map
=
377 (efi_set_virtual_address_map_t
*)
378 (unsigned long)runtime
->set_virtual_address_map
;
379 early_memunmap(runtime
, sizeof(efi_runtime_services_32_t
));
384 static int __init
efi_runtime_init64(void)
386 efi_runtime_services_64_t
*runtime
;
388 runtime
= early_memremap((unsigned long)efi
.systab
->runtime
,
389 sizeof(efi_runtime_services_64_t
));
391 pr_err("Could not map the runtime service table!\n");
396 * We will only need *early* access to the SetVirtualAddressMap
397 * EFI runtime service. All other runtime services will be called
398 * via the virtual mapping.
400 efi_phys
.set_virtual_address_map
=
401 (efi_set_virtual_address_map_t
*)
402 (unsigned long)runtime
->set_virtual_address_map
;
403 early_memunmap(runtime
, sizeof(efi_runtime_services_64_t
));
408 static int __init
efi_runtime_init(void)
413 * Check out the runtime services table. We need to map
414 * the runtime services table so that we can grab the physical
415 * address of several of the EFI runtime functions, needed to
416 * set the firmware into virtual mode.
418 * When EFI_PARAVIRT is in force then we could not map runtime
419 * service memory region because we do not have direct access to it.
420 * However, runtime services are available through proxy functions
421 * (e.g. in case of Xen dom0 EFI implementation they call special
422 * hypercall which executes relevant EFI functions) and that is why
423 * they are always enabled.
426 if (!efi_enabled(EFI_PARAVIRT
)) {
427 if (efi_enabled(EFI_64BIT
))
428 rv
= efi_runtime_init64();
430 rv
= efi_runtime_init32();
436 set_bit(EFI_RUNTIME_SERVICES
, &efi
.flags
);
441 static int __init
efi_memmap_init(void)
443 if (efi_enabled(EFI_PARAVIRT
))
446 /* Map the EFI memory map */
447 memmap
.map
= early_memremap((unsigned long)memmap
.phys_map
,
448 memmap
.nr_map
* memmap
.desc_size
);
449 if (memmap
.map
== NULL
) {
450 pr_err("Could not map the memory map!\n");
453 memmap
.map_end
= memmap
.map
+ (memmap
.nr_map
* memmap
.desc_size
);
458 set_bit(EFI_MEMMAP
, &efi
.flags
);
463 void __init
efi_init(void)
466 char vendor
[100] = "unknown";
471 if (boot_params
.efi_info
.efi_systab_hi
||
472 boot_params
.efi_info
.efi_memmap_hi
) {
473 pr_info("Table located above 4GB, disabling EFI.\n");
476 efi_phys
.systab
= (efi_system_table_t
*)boot_params
.efi_info
.efi_systab
;
478 efi_phys
.systab
= (efi_system_table_t
*)
479 (boot_params
.efi_info
.efi_systab
|
480 ((__u64
)boot_params
.efi_info
.efi_systab_hi
<<32));
483 if (efi_systab_init(efi_phys
.systab
))
486 efi
.config_table
= (unsigned long)efi
.systab
->tables
;
487 efi
.fw_vendor
= (unsigned long)efi
.systab
->fw_vendor
;
488 efi
.runtime
= (unsigned long)efi
.systab
->runtime
;
491 * Show what we know for posterity
493 c16
= tmp
= early_memremap(efi
.systab
->fw_vendor
, 2);
495 for (i
= 0; i
< sizeof(vendor
) - 1 && *c16
; ++i
)
499 pr_err("Could not map the firmware vendor!\n");
500 early_memunmap(tmp
, 2);
502 pr_info("EFI v%u.%.02u by %s\n",
503 efi
.systab
->hdr
.revision
>> 16,
504 efi
.systab
->hdr
.revision
& 0xffff, vendor
);
506 if (efi_reuse_config(efi
.systab
->tables
, efi
.systab
->nr_tables
))
509 if (efi_config_init(arch_tables
))
513 * Note: We currently don't support runtime services on an EFI
514 * that doesn't match the kernel 32/64-bit mode.
517 if (!efi_runtime_supported())
518 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
520 if (efi_runtime_disabled() || efi_runtime_init())
523 if (efi_memmap_init())
526 if (efi_enabled(EFI_DBG
))
532 void __init
efi_late_init(void)
537 void __init
efi_set_executable(efi_memory_desc_t
*md
, bool executable
)
541 addr
= md
->virt_addr
;
542 npages
= md
->num_pages
;
544 memrange_efi_to_native(&addr
, &npages
);
547 set_memory_x(addr
, npages
);
549 set_memory_nx(addr
, npages
);
552 void __init
runtime_code_page_mkexec(void)
554 efi_memory_desc_t
*md
;
557 /* Make EFI runtime service code area executable */
558 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
561 if (md
->type
!= EFI_RUNTIME_SERVICES_CODE
)
564 efi_set_executable(md
, true);
568 void __init
efi_memory_uc(u64 addr
, unsigned long size
)
570 unsigned long page_shift
= 1UL << EFI_PAGE_SHIFT
;
573 npages
= round_up(size
, page_shift
) / page_shift
;
574 memrange_efi_to_native(&addr
, &npages
);
575 set_memory_uc(addr
, npages
);
578 void __init
old_map_region(efi_memory_desc_t
*md
)
580 u64 start_pfn
, end_pfn
, end
;
584 start_pfn
= PFN_DOWN(md
->phys_addr
);
585 size
= md
->num_pages
<< PAGE_SHIFT
;
586 end
= md
->phys_addr
+ size
;
587 end_pfn
= PFN_UP(end
);
589 if (pfn_range_is_mapped(start_pfn
, end_pfn
)) {
590 va
= __va(md
->phys_addr
);
592 if (!(md
->attribute
& EFI_MEMORY_WB
))
593 efi_memory_uc((u64
)(unsigned long)va
, size
);
595 va
= efi_ioremap(md
->phys_addr
, size
,
596 md
->type
, md
->attribute
);
598 md
->virt_addr
= (u64
) (unsigned long) va
;
600 pr_err("ioremap of 0x%llX failed!\n",
601 (unsigned long long)md
->phys_addr
);
604 /* Merge contiguous regions of the same type and attribute */
605 static void __init
efi_merge_regions(void)
608 efi_memory_desc_t
*md
, *prev_md
= NULL
;
610 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
619 if (prev_md
->type
!= md
->type
||
620 prev_md
->attribute
!= md
->attribute
) {
625 prev_size
= prev_md
->num_pages
<< EFI_PAGE_SHIFT
;
627 if (md
->phys_addr
== (prev_md
->phys_addr
+ prev_size
)) {
628 prev_md
->num_pages
+= md
->num_pages
;
629 md
->type
= EFI_RESERVED_TYPE
;
637 static void __init
get_systab_virt_addr(efi_memory_desc_t
*md
)
642 size
= md
->num_pages
<< EFI_PAGE_SHIFT
;
643 end
= md
->phys_addr
+ size
;
644 systab
= (u64
)(unsigned long)efi_phys
.systab
;
645 if (md
->phys_addr
<= systab
&& systab
< end
) {
646 systab
+= md
->virt_addr
- md
->phys_addr
;
647 efi
.systab
= (efi_system_table_t
*)(unsigned long)systab
;
651 static void __init
save_runtime_map(void)
653 #ifdef CONFIG_KEXEC_CORE
654 efi_memory_desc_t
*md
;
655 void *tmp
, *p
, *q
= NULL
;
658 if (efi_enabled(EFI_OLD_MEMMAP
))
661 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
664 if (!(md
->attribute
& EFI_MEMORY_RUNTIME
) ||
665 (md
->type
== EFI_BOOT_SERVICES_CODE
) ||
666 (md
->type
== EFI_BOOT_SERVICES_DATA
))
668 tmp
= krealloc(q
, (count
+ 1) * memmap
.desc_size
, GFP_KERNEL
);
673 memcpy(q
+ count
* memmap
.desc_size
, md
, memmap
.desc_size
);
677 efi_runtime_map_setup(q
, count
, memmap
.desc_size
);
682 pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
686 static void *realloc_pages(void *old_memmap
, int old_shift
)
690 ret
= (void *)__get_free_pages(GFP_KERNEL
, old_shift
+ 1);
695 * A first-time allocation doesn't have anything to copy.
700 memcpy(ret
, old_memmap
, PAGE_SIZE
<< old_shift
);
703 free_pages((unsigned long)old_memmap
, old_shift
);
708 * Iterate the EFI memory map in reverse order because the regions
709 * will be mapped top-down. The end result is the same as if we had
710 * mapped things forward, but doesn't require us to change the
711 * existing implementation of efi_map_region().
713 static inline void *efi_map_next_entry_reverse(void *entry
)
717 return memmap
.map_end
- memmap
.desc_size
;
719 entry
-= memmap
.desc_size
;
720 if (entry
< memmap
.map
)
727 * efi_map_next_entry - Return the next EFI memory map descriptor
728 * @entry: Previous EFI memory map descriptor
730 * This is a helper function to iterate over the EFI memory map, which
731 * we do in different orders depending on the current configuration.
733 * To begin traversing the memory map @entry must be %NULL.
735 * Returns %NULL when we reach the end of the memory map.
737 static void *efi_map_next_entry(void *entry
)
739 if (!efi_enabled(EFI_OLD_MEMMAP
) && efi_enabled(EFI_64BIT
)) {
741 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
742 * config table feature requires us to map all entries
743 * in the same order as they appear in the EFI memory
744 * map. That is to say, entry N must have a lower
745 * virtual address than entry N+1. This is because the
746 * firmware toolchain leaves relative references in
747 * the code/data sections, which are split and become
748 * separate EFI memory regions. Mapping things
749 * out-of-order leads to the firmware accessing
750 * unmapped addresses.
752 * Since we need to map things this way whether or not
753 * the kernel actually makes use of
754 * EFI_PROPERTIES_TABLE, let's just switch to this
755 * scheme by default for 64-bit.
757 return efi_map_next_entry_reverse(entry
);
764 entry
+= memmap
.desc_size
;
765 if (entry
>= memmap
.map_end
)
772 * Map the efi memory ranges of the runtime services and update new_mmap with
775 static void * __init
efi_map_regions(int *count
, int *pg_shift
)
777 void *p
, *new_memmap
= NULL
;
778 unsigned long left
= 0;
779 efi_memory_desc_t
*md
;
782 while ((p
= efi_map_next_entry(p
))) {
784 if (!(md
->attribute
& EFI_MEMORY_RUNTIME
)) {
786 if (md
->type
!= EFI_BOOT_SERVICES_CODE
&&
787 md
->type
!= EFI_BOOT_SERVICES_DATA
)
793 get_systab_virt_addr(md
);
795 if (left
< memmap
.desc_size
) {
796 new_memmap
= realloc_pages(new_memmap
, *pg_shift
);
800 left
+= PAGE_SIZE
<< *pg_shift
;
804 memcpy(new_memmap
+ (*count
* memmap
.desc_size
), md
,
807 left
-= memmap
.desc_size
;
814 static void __init
kexec_enter_virtual_mode(void)
816 #ifdef CONFIG_KEXEC_CORE
817 efi_memory_desc_t
*md
;
823 * We don't do virtual mode, since we don't do runtime services, on
826 if (!efi_is_native()) {
828 clear_bit(EFI_RUNTIME_SERVICES
, &efi
.flags
);
833 * Map efi regions which were passed via setup_data. The virt_addr is a
834 * fixed addr which was used in first kernel of a kexec boot.
836 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
838 efi_map_region_fixed(md
); /* FIXME: add error handling */
839 get_systab_virt_addr(md
);
846 efi_sync_low_kernel_mappings();
849 * Now that EFI is in virtual mode, update the function
850 * pointers in the runtime service table to the new virtual addresses.
852 * Call EFI services through wrapper functions.
854 efi
.runtime_version
= efi_systab
.hdr
.revision
;
856 efi_native_runtime_setup();
858 efi
.set_virtual_address_map
= NULL
;
860 if (efi_enabled(EFI_OLD_MEMMAP
) && (__supported_pte_mask
& _PAGE_NX
))
861 runtime_code_page_mkexec();
863 /* clean DUMMY object */
864 efi_delete_dummy_variable();
869 * This function will switch the EFI runtime services to virtual mode.
870 * Essentially, we look through the EFI memmap and map every region that
871 * has the runtime attribute bit set in its memory descriptor into the
872 * ->trampoline_pgd page table using a top-down VA allocation scheme.
874 * The old method which used to update that memory descriptor with the
875 * virtual address obtained from ioremap() is still supported when the
876 * kernel is booted with efi=old_map on its command line. Same old
877 * method enabled the runtime services to be called without having to
878 * thunk back into physical mode for every invocation.
880 * The new method does a pagetable switch in a preemption-safe manner
881 * so that we're in a different address space when calling a runtime
882 * function. For function arguments passing we do copy the PGDs of the
883 * kernel page table into ->trampoline_pgd prior to each call.
885 * Specially for kexec boot, efi runtime maps in previous kernel should
886 * be passed in via setup_data. In that case runtime ranges will be mapped
887 * to the same virtual addresses as the first kernel, see
888 * kexec_enter_virtual_mode().
890 static void __init
__efi_enter_virtual_mode(void)
892 int count
= 0, pg_shift
= 0;
893 void *new_memmap
= NULL
;
899 new_memmap
= efi_map_regions(&count
, &pg_shift
);
901 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
902 clear_bit(EFI_RUNTIME_SERVICES
, &efi
.flags
);
910 if (efi_setup_page_tables(__pa(new_memmap
), 1 << pg_shift
)) {
911 clear_bit(EFI_RUNTIME_SERVICES
, &efi
.flags
);
915 efi_sync_low_kernel_mappings();
916 efi_dump_pagetable();
918 if (efi_is_native()) {
919 status
= phys_efi_set_virtual_address_map(
920 memmap
.desc_size
* count
,
923 (efi_memory_desc_t
*)__pa(new_memmap
));
925 status
= efi_thunk_set_virtual_address_map(
926 efi_phys
.set_virtual_address_map
,
927 memmap
.desc_size
* count
,
930 (efi_memory_desc_t
*)__pa(new_memmap
));
933 if (status
!= EFI_SUCCESS
) {
934 pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
936 panic("EFI call to SetVirtualAddressMap() failed!");
940 * Now that EFI is in virtual mode, update the function
941 * pointers in the runtime service table to the new virtual addresses.
943 * Call EFI services through wrapper functions.
945 efi
.runtime_version
= efi_systab
.hdr
.revision
;
948 efi_native_runtime_setup();
950 efi_thunk_runtime_setup();
952 efi
.set_virtual_address_map
= NULL
;
954 efi_runtime_mkexec();
957 * We mapped the descriptor array into the EFI pagetable above but we're
958 * not unmapping it here. Here's why:
960 * We're copying select PGDs from the kernel page table to the EFI page
961 * table and when we do so and make changes to those PGDs like unmapping
962 * stuff from them, those changes appear in the kernel page table and we
965 * From setup_real_mode():
968 * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
970 * In this particular case, our allocation is in PGD 0 of the EFI page
971 * table but we've copied that PGD from PGD[272] of the EFI page table:
973 * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
975 * where the direct memory mapping in kernel space is.
977 * new_memmap's VA comes from that direct mapping and thus clearing it,
978 * it would get cleared in the kernel page table too.
980 * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
982 free_pages((unsigned long)new_memmap
, pg_shift
);
984 /* clean DUMMY object */
985 efi_delete_dummy_variable();
988 void __init
efi_enter_virtual_mode(void)
990 if (efi_enabled(EFI_PARAVIRT
))
994 kexec_enter_virtual_mode();
996 __efi_enter_virtual_mode();
1000 * Convenience functions to obtain memory types and attributes
1002 u32
efi_mem_type(unsigned long phys_addr
)
1004 efi_memory_desc_t
*md
;
1007 if (!efi_enabled(EFI_MEMMAP
))
1010 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
1012 if ((md
->phys_addr
<= phys_addr
) &&
1013 (phys_addr
< (md
->phys_addr
+
1014 (md
->num_pages
<< EFI_PAGE_SHIFT
))))
1020 static int __init
arch_parse_efi_cmdline(char *str
)
1023 pr_warn("need at least one option\n");
1027 if (parse_option_str(str
, "old_map"))
1028 set_bit(EFI_OLD_MEMMAP
, &efi
.flags
);
1032 early_param("efi", arch_parse_efi_cmdline
);