Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[linux/fpc-iii.git] / arch / x86 / platform / efi / efi.c
blobf93545e7dc54e7e2aa19bf494db55eb74080b739
1 /*
2 * Common EFI (Extensible Firmware Interface) support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
5 * Copyright (C) 1999 VA Linux Systems
6 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
7 * Copyright (C) 1999-2002 Hewlett-Packard Co.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * Copyright (C) 2005-2008 Intel Co.
11 * Fenghua Yu <fenghua.yu@intel.com>
12 * Bibo Mao <bibo.mao@intel.com>
13 * Chandramouli Narayanan <mouli@linux.intel.com>
14 * Huang Ying <ying.huang@intel.com>
15 * Copyright (C) 2013 SuSE Labs
16 * Borislav Petkov <bp@suse.de> - runtime services VA mapping
18 * Copied from efi_32.c to eliminate the duplicated code between EFI
19 * 32/64 support code. --ying 2007-10-26
21 * All EFI Runtime Services are not implemented yet as EFI only
22 * supports physical mode addressing on SoftSDV. This is to be fixed
23 * in a future version. --drummond 1999-07-20
25 * Implemented EFI runtime services and virtual mode calls. --davidm
27 * Goutham Rao: <goutham.rao@intel.com>
28 * Skip non-WB memory and ignore empty memory ranges.
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/efi.h>
36 #include <linux/efi-bgrt.h>
37 #include <linux/export.h>
38 #include <linux/bootmem.h>
39 #include <linux/slab.h>
40 #include <linux/memblock.h>
41 #include <linux/spinlock.h>
42 #include <linux/uaccess.h>
43 #include <linux/time.h>
44 #include <linux/io.h>
45 #include <linux/reboot.h>
46 #include <linux/bcd.h>
48 #include <asm/setup.h>
49 #include <asm/efi.h>
50 #include <asm/time.h>
51 #include <asm/cacheflush.h>
52 #include <asm/tlbflush.h>
53 #include <asm/x86_init.h>
54 #include <asm/rtc.h>
55 #include <asm/uv/uv.h>
57 static struct efi efi_phys __initdata;
58 static efi_system_table_t efi_systab __initdata;
60 static efi_config_table_type_t arch_tables[] __initdata = {
61 #ifdef CONFIG_X86_UV
62 {UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
63 #endif
64 {NULL_GUID, NULL, NULL},
67 u64 efi_setup; /* efi setup_data physical address */
69 static int add_efi_memmap __initdata;
70 static int __init setup_add_efi_memmap(char *arg)
72 add_efi_memmap = 1;
73 return 0;
75 early_param("add_efi_memmap", setup_add_efi_memmap);
77 static efi_status_t __init phys_efi_set_virtual_address_map(
78 unsigned long memory_map_size,
79 unsigned long descriptor_size,
80 u32 descriptor_version,
81 efi_memory_desc_t *virtual_map)
83 efi_status_t status;
84 unsigned long flags;
85 pgd_t *save_pgd;
87 save_pgd = efi_call_phys_prolog();
89 /* Disable interrupts around EFI calls: */
90 local_irq_save(flags);
91 status = efi_call_phys(efi_phys.set_virtual_address_map,
92 memory_map_size, descriptor_size,
93 descriptor_version, virtual_map);
94 local_irq_restore(flags);
96 efi_call_phys_epilog(save_pgd);
98 return status;
101 void efi_get_time(struct timespec *now)
103 efi_status_t status;
104 efi_time_t eft;
105 efi_time_cap_t cap;
107 status = efi.get_time(&eft, &cap);
108 if (status != EFI_SUCCESS)
109 pr_err("Oops: efitime: can't read time!\n");
111 now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour,
112 eft.minute, eft.second);
113 now->tv_nsec = 0;
116 void __init efi_find_mirror(void)
118 efi_memory_desc_t *md;
119 u64 mirror_size = 0, total_size = 0;
121 for_each_efi_memory_desc(md) {
122 unsigned long long start = md->phys_addr;
123 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
125 total_size += size;
126 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
127 memblock_mark_mirror(start, size);
128 mirror_size += size;
131 if (mirror_size)
132 pr_info("Memory: %lldM/%lldM mirrored memory\n",
133 mirror_size>>20, total_size>>20);
137 * Tell the kernel about the EFI memory map. This might include
138 * more than the max 128 entries that can fit in the e820 legacy
139 * (zeropage) memory map.
142 static void __init do_add_efi_memmap(void)
144 efi_memory_desc_t *md;
146 for_each_efi_memory_desc(md) {
147 unsigned long long start = md->phys_addr;
148 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
149 int e820_type;
151 switch (md->type) {
152 case EFI_LOADER_CODE:
153 case EFI_LOADER_DATA:
154 case EFI_BOOT_SERVICES_CODE:
155 case EFI_BOOT_SERVICES_DATA:
156 case EFI_CONVENTIONAL_MEMORY:
157 if (md->attribute & EFI_MEMORY_WB)
158 e820_type = E820_RAM;
159 else
160 e820_type = E820_RESERVED;
161 break;
162 case EFI_ACPI_RECLAIM_MEMORY:
163 e820_type = E820_ACPI;
164 break;
165 case EFI_ACPI_MEMORY_NVS:
166 e820_type = E820_NVS;
167 break;
168 case EFI_UNUSABLE_MEMORY:
169 e820_type = E820_UNUSABLE;
170 break;
171 case EFI_PERSISTENT_MEMORY:
172 e820_type = E820_PMEM;
173 break;
174 default:
176 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
177 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
178 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
180 e820_type = E820_RESERVED;
181 break;
183 e820_add_region(start, size, e820_type);
185 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
188 int __init efi_memblock_x86_reserve_range(void)
190 struct efi_info *e = &boot_params.efi_info;
191 phys_addr_t pmap;
193 if (efi_enabled(EFI_PARAVIRT))
194 return 0;
196 #ifdef CONFIG_X86_32
197 /* Can't handle data above 4GB at this time */
198 if (e->efi_memmap_hi) {
199 pr_err("Memory map is above 4GB, disabling EFI.\n");
200 return -EINVAL;
202 pmap = e->efi_memmap;
203 #else
204 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
205 #endif
206 efi.memmap.phys_map = pmap;
207 efi.memmap.nr_map = e->efi_memmap_size /
208 e->efi_memdesc_size;
209 efi.memmap.desc_size = e->efi_memdesc_size;
210 efi.memmap.desc_version = e->efi_memdesc_version;
212 WARN(efi.memmap.desc_version != 1,
213 "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
214 efi.memmap.desc_version);
216 memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
218 return 0;
221 void __init efi_print_memmap(void)
223 efi_memory_desc_t *md;
224 int i = 0;
226 for_each_efi_memory_desc(md) {
227 char buf[64];
229 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
230 i++, efi_md_typeattr_format(buf, sizeof(buf), md),
231 md->phys_addr,
232 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
233 (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
237 void __init efi_unmap_memmap(void)
239 unsigned long size;
241 clear_bit(EFI_MEMMAP, &efi.flags);
243 size = efi.memmap.nr_map * efi.memmap.desc_size;
244 if (efi.memmap.map) {
245 early_memunmap(efi.memmap.map, size);
246 efi.memmap.map = NULL;
250 static int __init efi_systab_init(void *phys)
252 if (efi_enabled(EFI_64BIT)) {
253 efi_system_table_64_t *systab64;
254 struct efi_setup_data *data = NULL;
255 u64 tmp = 0;
257 if (efi_setup) {
258 data = early_memremap(efi_setup, sizeof(*data));
259 if (!data)
260 return -ENOMEM;
262 systab64 = early_memremap((unsigned long)phys,
263 sizeof(*systab64));
264 if (systab64 == NULL) {
265 pr_err("Couldn't map the system table!\n");
266 if (data)
267 early_memunmap(data, sizeof(*data));
268 return -ENOMEM;
271 efi_systab.hdr = systab64->hdr;
272 efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
273 systab64->fw_vendor;
274 tmp |= data ? data->fw_vendor : systab64->fw_vendor;
275 efi_systab.fw_revision = systab64->fw_revision;
276 efi_systab.con_in_handle = systab64->con_in_handle;
277 tmp |= systab64->con_in_handle;
278 efi_systab.con_in = systab64->con_in;
279 tmp |= systab64->con_in;
280 efi_systab.con_out_handle = systab64->con_out_handle;
281 tmp |= systab64->con_out_handle;
282 efi_systab.con_out = systab64->con_out;
283 tmp |= systab64->con_out;
284 efi_systab.stderr_handle = systab64->stderr_handle;
285 tmp |= systab64->stderr_handle;
286 efi_systab.stderr = systab64->stderr;
287 tmp |= systab64->stderr;
288 efi_systab.runtime = data ?
289 (void *)(unsigned long)data->runtime :
290 (void *)(unsigned long)systab64->runtime;
291 tmp |= data ? data->runtime : systab64->runtime;
292 efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
293 tmp |= systab64->boottime;
294 efi_systab.nr_tables = systab64->nr_tables;
295 efi_systab.tables = data ? (unsigned long)data->tables :
296 systab64->tables;
297 tmp |= data ? data->tables : systab64->tables;
299 early_memunmap(systab64, sizeof(*systab64));
300 if (data)
301 early_memunmap(data, sizeof(*data));
302 #ifdef CONFIG_X86_32
303 if (tmp >> 32) {
304 pr_err("EFI data located above 4GB, disabling EFI.\n");
305 return -EINVAL;
307 #endif
308 } else {
309 efi_system_table_32_t *systab32;
311 systab32 = early_memremap((unsigned long)phys,
312 sizeof(*systab32));
313 if (systab32 == NULL) {
314 pr_err("Couldn't map the system table!\n");
315 return -ENOMEM;
318 efi_systab.hdr = systab32->hdr;
319 efi_systab.fw_vendor = systab32->fw_vendor;
320 efi_systab.fw_revision = systab32->fw_revision;
321 efi_systab.con_in_handle = systab32->con_in_handle;
322 efi_systab.con_in = systab32->con_in;
323 efi_systab.con_out_handle = systab32->con_out_handle;
324 efi_systab.con_out = systab32->con_out;
325 efi_systab.stderr_handle = systab32->stderr_handle;
326 efi_systab.stderr = systab32->stderr;
327 efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
328 efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
329 efi_systab.nr_tables = systab32->nr_tables;
330 efi_systab.tables = systab32->tables;
332 early_memunmap(systab32, sizeof(*systab32));
335 efi.systab = &efi_systab;
338 * Verify the EFI Table
340 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
341 pr_err("System table signature incorrect!\n");
342 return -EINVAL;
344 if ((efi.systab->hdr.revision >> 16) == 0)
345 pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
346 efi.systab->hdr.revision >> 16,
347 efi.systab->hdr.revision & 0xffff);
349 return 0;
352 static int __init efi_runtime_init32(void)
354 efi_runtime_services_32_t *runtime;
356 runtime = early_memremap((unsigned long)efi.systab->runtime,
357 sizeof(efi_runtime_services_32_t));
358 if (!runtime) {
359 pr_err("Could not map the runtime service table!\n");
360 return -ENOMEM;
364 * We will only need *early* access to the SetVirtualAddressMap
365 * EFI runtime service. All other runtime services will be called
366 * via the virtual mapping.
368 efi_phys.set_virtual_address_map =
369 (efi_set_virtual_address_map_t *)
370 (unsigned long)runtime->set_virtual_address_map;
371 early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
373 return 0;
376 static int __init efi_runtime_init64(void)
378 efi_runtime_services_64_t *runtime;
380 runtime = early_memremap((unsigned long)efi.systab->runtime,
381 sizeof(efi_runtime_services_64_t));
382 if (!runtime) {
383 pr_err("Could not map the runtime service table!\n");
384 return -ENOMEM;
388 * We will only need *early* access to the SetVirtualAddressMap
389 * EFI runtime service. All other runtime services will be called
390 * via the virtual mapping.
392 efi_phys.set_virtual_address_map =
393 (efi_set_virtual_address_map_t *)
394 (unsigned long)runtime->set_virtual_address_map;
395 early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
397 return 0;
400 static int __init efi_runtime_init(void)
402 int rv;
405 * Check out the runtime services table. We need to map
406 * the runtime services table so that we can grab the physical
407 * address of several of the EFI runtime functions, needed to
408 * set the firmware into virtual mode.
410 * When EFI_PARAVIRT is in force then we could not map runtime
411 * service memory region because we do not have direct access to it.
412 * However, runtime services are available through proxy functions
413 * (e.g. in case of Xen dom0 EFI implementation they call special
414 * hypercall which executes relevant EFI functions) and that is why
415 * they are always enabled.
418 if (!efi_enabled(EFI_PARAVIRT)) {
419 if (efi_enabled(EFI_64BIT))
420 rv = efi_runtime_init64();
421 else
422 rv = efi_runtime_init32();
424 if (rv)
425 return rv;
428 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
430 return 0;
433 static int __init efi_memmap_init(void)
435 unsigned long addr, size;
437 if (efi_enabled(EFI_PARAVIRT))
438 return 0;
440 /* Map the EFI memory map */
441 size = efi.memmap.nr_map * efi.memmap.desc_size;
442 addr = (unsigned long)efi.memmap.phys_map;
444 efi.memmap.map = early_memremap(addr, size);
445 if (efi.memmap.map == NULL) {
446 pr_err("Could not map the memory map!\n");
447 return -ENOMEM;
450 efi.memmap.map_end = efi.memmap.map + size;
452 if (add_efi_memmap)
453 do_add_efi_memmap();
455 set_bit(EFI_MEMMAP, &efi.flags);
457 return 0;
460 void __init efi_init(void)
462 efi_char16_t *c16;
463 char vendor[100] = "unknown";
464 int i = 0;
465 void *tmp;
467 #ifdef CONFIG_X86_32
468 if (boot_params.efi_info.efi_systab_hi ||
469 boot_params.efi_info.efi_memmap_hi) {
470 pr_info("Table located above 4GB, disabling EFI.\n");
471 return;
473 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
474 #else
475 efi_phys.systab = (efi_system_table_t *)
476 (boot_params.efi_info.efi_systab |
477 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
478 #endif
480 if (efi_systab_init(efi_phys.systab))
481 return;
483 efi.config_table = (unsigned long)efi.systab->tables;
484 efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
485 efi.runtime = (unsigned long)efi.systab->runtime;
488 * Show what we know for posterity
490 c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
491 if (c16) {
492 for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
493 vendor[i] = *c16++;
494 vendor[i] = '\0';
495 } else
496 pr_err("Could not map the firmware vendor!\n");
497 early_memunmap(tmp, 2);
499 pr_info("EFI v%u.%.02u by %s\n",
500 efi.systab->hdr.revision >> 16,
501 efi.systab->hdr.revision & 0xffff, vendor);
503 if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
504 return;
506 if (efi_config_init(arch_tables))
507 return;
510 * Note: We currently don't support runtime services on an EFI
511 * that doesn't match the kernel 32/64-bit mode.
514 if (!efi_runtime_supported())
515 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
516 else {
517 if (efi_runtime_disabled() || efi_runtime_init())
518 return;
520 if (efi_memmap_init())
521 return;
523 if (efi_enabled(EFI_DBG))
524 efi_print_memmap();
526 efi_esrt_init();
529 void __init efi_late_init(void)
531 efi_bgrt_init();
534 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
536 u64 addr, npages;
538 addr = md->virt_addr;
539 npages = md->num_pages;
541 memrange_efi_to_native(&addr, &npages);
543 if (executable)
544 set_memory_x(addr, npages);
545 else
546 set_memory_nx(addr, npages);
549 void __init runtime_code_page_mkexec(void)
551 efi_memory_desc_t *md;
553 /* Make EFI runtime service code area executable */
554 for_each_efi_memory_desc(md) {
555 if (md->type != EFI_RUNTIME_SERVICES_CODE)
556 continue;
558 efi_set_executable(md, true);
562 void __init efi_memory_uc(u64 addr, unsigned long size)
564 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
565 u64 npages;
567 npages = round_up(size, page_shift) / page_shift;
568 memrange_efi_to_native(&addr, &npages);
569 set_memory_uc(addr, npages);
572 void __init old_map_region(efi_memory_desc_t *md)
574 u64 start_pfn, end_pfn, end;
575 unsigned long size;
576 void *va;
578 start_pfn = PFN_DOWN(md->phys_addr);
579 size = md->num_pages << PAGE_SHIFT;
580 end = md->phys_addr + size;
581 end_pfn = PFN_UP(end);
583 if (pfn_range_is_mapped(start_pfn, end_pfn)) {
584 va = __va(md->phys_addr);
586 if (!(md->attribute & EFI_MEMORY_WB))
587 efi_memory_uc((u64)(unsigned long)va, size);
588 } else
589 va = efi_ioremap(md->phys_addr, size,
590 md->type, md->attribute);
592 md->virt_addr = (u64) (unsigned long) va;
593 if (!va)
594 pr_err("ioremap of 0x%llX failed!\n",
595 (unsigned long long)md->phys_addr);
598 /* Merge contiguous regions of the same type and attribute */
599 static void __init efi_merge_regions(void)
601 efi_memory_desc_t *md, *prev_md = NULL;
603 for_each_efi_memory_desc(md) {
604 u64 prev_size;
606 if (!prev_md) {
607 prev_md = md;
608 continue;
611 if (prev_md->type != md->type ||
612 prev_md->attribute != md->attribute) {
613 prev_md = md;
614 continue;
617 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
619 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
620 prev_md->num_pages += md->num_pages;
621 md->type = EFI_RESERVED_TYPE;
622 md->attribute = 0;
623 continue;
625 prev_md = md;
629 static void __init get_systab_virt_addr(efi_memory_desc_t *md)
631 unsigned long size;
632 u64 end, systab;
634 size = md->num_pages << EFI_PAGE_SHIFT;
635 end = md->phys_addr + size;
636 systab = (u64)(unsigned long)efi_phys.systab;
637 if (md->phys_addr <= systab && systab < end) {
638 systab += md->virt_addr - md->phys_addr;
639 efi.systab = (efi_system_table_t *)(unsigned long)systab;
643 static void __init save_runtime_map(void)
645 #ifdef CONFIG_KEXEC_CORE
646 unsigned long desc_size;
647 efi_memory_desc_t *md;
648 void *tmp, *q = NULL;
649 int count = 0;
651 if (efi_enabled(EFI_OLD_MEMMAP))
652 return;
654 desc_size = efi.memmap.desc_size;
656 for_each_efi_memory_desc(md) {
657 if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
658 (md->type == EFI_BOOT_SERVICES_CODE) ||
659 (md->type == EFI_BOOT_SERVICES_DATA))
660 continue;
661 tmp = krealloc(q, (count + 1) * desc_size, GFP_KERNEL);
662 if (!tmp)
663 goto out;
664 q = tmp;
666 memcpy(q + count * desc_size, md, desc_size);
667 count++;
670 efi_runtime_map_setup(q, count, desc_size);
671 return;
673 out:
674 kfree(q);
675 pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
676 #endif
679 static void *realloc_pages(void *old_memmap, int old_shift)
681 void *ret;
683 ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
684 if (!ret)
685 goto out;
688 * A first-time allocation doesn't have anything to copy.
690 if (!old_memmap)
691 return ret;
693 memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
695 out:
696 free_pages((unsigned long)old_memmap, old_shift);
697 return ret;
701 * Iterate the EFI memory map in reverse order because the regions
702 * will be mapped top-down. The end result is the same as if we had
703 * mapped things forward, but doesn't require us to change the
704 * existing implementation of efi_map_region().
706 static inline void *efi_map_next_entry_reverse(void *entry)
708 /* Initial call */
709 if (!entry)
710 return efi.memmap.map_end - efi.memmap.desc_size;
712 entry -= efi.memmap.desc_size;
713 if (entry < efi.memmap.map)
714 return NULL;
716 return entry;
720 * efi_map_next_entry - Return the next EFI memory map descriptor
721 * @entry: Previous EFI memory map descriptor
723 * This is a helper function to iterate over the EFI memory map, which
724 * we do in different orders depending on the current configuration.
726 * To begin traversing the memory map @entry must be %NULL.
728 * Returns %NULL when we reach the end of the memory map.
730 static void *efi_map_next_entry(void *entry)
732 if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
734 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
735 * config table feature requires us to map all entries
736 * in the same order as they appear in the EFI memory
737 * map. That is to say, entry N must have a lower
738 * virtual address than entry N+1. This is because the
739 * firmware toolchain leaves relative references in
740 * the code/data sections, which are split and become
741 * separate EFI memory regions. Mapping things
742 * out-of-order leads to the firmware accessing
743 * unmapped addresses.
745 * Since we need to map things this way whether or not
746 * the kernel actually makes use of
747 * EFI_PROPERTIES_TABLE, let's just switch to this
748 * scheme by default for 64-bit.
750 return efi_map_next_entry_reverse(entry);
753 /* Initial call */
754 if (!entry)
755 return efi.memmap.map;
757 entry += efi.memmap.desc_size;
758 if (entry >= efi.memmap.map_end)
759 return NULL;
761 return entry;
765 * Map the efi memory ranges of the runtime services and update new_mmap with
766 * virtual addresses.
768 static void * __init efi_map_regions(int *count, int *pg_shift)
770 void *p, *new_memmap = NULL;
771 unsigned long left = 0;
772 unsigned long desc_size;
773 efi_memory_desc_t *md;
775 desc_size = efi.memmap.desc_size;
777 p = NULL;
778 while ((p = efi_map_next_entry(p))) {
779 md = p;
780 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
781 #ifdef CONFIG_X86_64
782 if (md->type != EFI_BOOT_SERVICES_CODE &&
783 md->type != EFI_BOOT_SERVICES_DATA)
784 #endif
785 continue;
788 efi_map_region(md);
789 get_systab_virt_addr(md);
791 if (left < desc_size) {
792 new_memmap = realloc_pages(new_memmap, *pg_shift);
793 if (!new_memmap)
794 return NULL;
796 left += PAGE_SIZE << *pg_shift;
797 (*pg_shift)++;
800 memcpy(new_memmap + (*count * desc_size), md, desc_size);
802 left -= desc_size;
803 (*count)++;
806 return new_memmap;
809 static void __init kexec_enter_virtual_mode(void)
811 #ifdef CONFIG_KEXEC_CORE
812 efi_memory_desc_t *md;
813 unsigned int num_pages;
815 efi.systab = NULL;
818 * We don't do virtual mode, since we don't do runtime services, on
819 * non-native EFI
821 if (!efi_is_native()) {
822 efi_unmap_memmap();
823 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
824 return;
827 if (efi_alloc_page_tables()) {
828 pr_err("Failed to allocate EFI page tables\n");
829 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
830 return;
834 * Map efi regions which were passed via setup_data. The virt_addr is a
835 * fixed addr which was used in first kernel of a kexec boot.
837 for_each_efi_memory_desc(md) {
838 efi_map_region_fixed(md); /* FIXME: add error handling */
839 get_systab_virt_addr(md);
842 save_runtime_map();
844 BUG_ON(!efi.systab);
846 num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
847 num_pages >>= PAGE_SHIFT;
849 if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
850 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
851 return;
854 efi_sync_low_kernel_mappings();
857 * Now that EFI is in virtual mode, update the function
858 * pointers in the runtime service table to the new virtual addresses.
860 * Call EFI services through wrapper functions.
862 efi.runtime_version = efi_systab.hdr.revision;
864 efi_native_runtime_setup();
866 efi.set_virtual_address_map = NULL;
868 if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
869 runtime_code_page_mkexec();
871 /* clean DUMMY object */
872 efi_delete_dummy_variable();
873 #endif
877 * This function will switch the EFI runtime services to virtual mode.
878 * Essentially, we look through the EFI memmap and map every region that
879 * has the runtime attribute bit set in its memory descriptor into the
880 * efi_pgd page table.
882 * The old method which used to update that memory descriptor with the
883 * virtual address obtained from ioremap() is still supported when the
884 * kernel is booted with efi=old_map on its command line. Same old
885 * method enabled the runtime services to be called without having to
886 * thunk back into physical mode for every invocation.
888 * The new method does a pagetable switch in a preemption-safe manner
889 * so that we're in a different address space when calling a runtime
890 * function. For function arguments passing we do copy the PUDs of the
891 * kernel page table into efi_pgd prior to each call.
893 * Specially for kexec boot, efi runtime maps in previous kernel should
894 * be passed in via setup_data. In that case runtime ranges will be mapped
895 * to the same virtual addresses as the first kernel, see
896 * kexec_enter_virtual_mode().
898 static void __init __efi_enter_virtual_mode(void)
900 int count = 0, pg_shift = 0;
901 void *new_memmap = NULL;
902 efi_status_t status;
904 efi.systab = NULL;
906 if (efi_alloc_page_tables()) {
907 pr_err("Failed to allocate EFI page tables\n");
908 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
909 return;
912 efi_merge_regions();
913 new_memmap = efi_map_regions(&count, &pg_shift);
914 if (!new_memmap) {
915 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
916 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
917 return;
920 save_runtime_map();
922 BUG_ON(!efi.systab);
924 if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
925 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
926 return;
929 efi_sync_low_kernel_mappings();
931 if (efi_is_native()) {
932 status = phys_efi_set_virtual_address_map(
933 efi.memmap.desc_size * count,
934 efi.memmap.desc_size,
935 efi.memmap.desc_version,
936 (efi_memory_desc_t *)__pa(new_memmap));
937 } else {
938 status = efi_thunk_set_virtual_address_map(
939 efi_phys.set_virtual_address_map,
940 efi.memmap.desc_size * count,
941 efi.memmap.desc_size,
942 efi.memmap.desc_version,
943 (efi_memory_desc_t *)__pa(new_memmap));
946 if (status != EFI_SUCCESS) {
947 pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
948 status);
949 panic("EFI call to SetVirtualAddressMap() failed!");
953 * Now that EFI is in virtual mode, update the function
954 * pointers in the runtime service table to the new virtual addresses.
956 * Call EFI services through wrapper functions.
958 efi.runtime_version = efi_systab.hdr.revision;
960 if (efi_is_native())
961 efi_native_runtime_setup();
962 else
963 efi_thunk_runtime_setup();
965 efi.set_virtual_address_map = NULL;
968 * Apply more restrictive page table mapping attributes now that
969 * SVAM() has been called and the firmware has performed all
970 * necessary relocation fixups for the new virtual addresses.
972 efi_runtime_update_mappings();
973 efi_dump_pagetable();
976 * We mapped the descriptor array into the EFI pagetable above
977 * but we're not unmapping it here because if we're running in
978 * EFI mixed mode we need all of memory to be accessible when
979 * we pass parameters to the EFI runtime services in the
980 * thunking code.
982 * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
984 free_pages((unsigned long)new_memmap, pg_shift);
986 /* clean DUMMY object */
987 efi_delete_dummy_variable();
990 void __init efi_enter_virtual_mode(void)
992 if (efi_enabled(EFI_PARAVIRT))
993 return;
995 if (efi_setup)
996 kexec_enter_virtual_mode();
997 else
998 __efi_enter_virtual_mode();
1002 * Convenience functions to obtain memory types and attributes
1004 u32 efi_mem_type(unsigned long phys_addr)
1006 efi_memory_desc_t *md;
1008 if (!efi_enabled(EFI_MEMMAP))
1009 return 0;
1011 for_each_efi_memory_desc(md) {
1012 if ((md->phys_addr <= phys_addr) &&
1013 (phys_addr < (md->phys_addr +
1014 (md->num_pages << EFI_PAGE_SHIFT))))
1015 return md->type;
1017 return 0;
1020 static int __init arch_parse_efi_cmdline(char *str)
1022 if (!str) {
1023 pr_warn("need at least one option\n");
1024 return -EINVAL;
1027 if (parse_option_str(str, "old_map"))
1028 set_bit(EFI_OLD_MEMMAP, &efi.flags);
1030 return 0;
1032 early_param("efi", arch_parse_efi_cmdline);