ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / arch / x86 / platform / efi / efi.c
blob59f7f6d60cf61a8410688514bd5fc9107ccd5d75
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common EFI (Extensible Firmware Interface) support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
6 * Copyright (C) 1999 VA Linux Systems
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 * Copyright (C) 1999-2002 Hewlett-Packard Co.
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2005-2008 Intel Co.
12 * Fenghua Yu <fenghua.yu@intel.com>
13 * Bibo Mao <bibo.mao@intel.com>
14 * Chandramouli Narayanan <mouli@linux.intel.com>
15 * Huang Ying <ying.huang@intel.com>
16 * Copyright (C) 2013 SuSE Labs
17 * Borislav Petkov <bp@suse.de> - runtime services VA mapping
19 * Copied from efi_32.c to eliminate the duplicated code between EFI
20 * 32/64 support code. --ying 2007-10-26
22 * All EFI Runtime Services are not implemented yet as EFI only
23 * supports physical mode addressing on SoftSDV. This is to be fixed
24 * in a future version. --drummond 1999-07-20
26 * Implemented EFI runtime services and virtual mode calls. --davidm
28 * Goutham Rao: <goutham.rao@intel.com>
29 * Skip non-WB memory and ignore empty memory ranges.
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/efi.h>
37 #include <linux/efi-bgrt.h>
38 #include <linux/export.h>
39 #include <linux/memblock.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/uaccess.h>
43 #include <linux/time.h>
44 #include <linux/io.h>
45 #include <linux/reboot.h>
46 #include <linux/bcd.h>
48 #include <asm/setup.h>
49 #include <asm/efi.h>
50 #include <asm/e820/api.h>
51 #include <asm/time.h>
52 #include <asm/set_memory.h>
53 #include <asm/tlbflush.h>
54 #include <asm/x86_init.h>
55 #include <asm/uv/uv.h>
57 static efi_system_table_t efi_systab __initdata;
58 static u64 efi_systab_phys __initdata;
60 static efi_config_table_type_t arch_tables[] __initdata = {
61 #ifdef CONFIG_X86_UV
62 {UV_SYSTEM_TABLE_GUID, "UVsystab", &uv_systab_phys},
63 #endif
64 {NULL_GUID, NULL, NULL},
67 static const unsigned long * const efi_tables[] = {
68 &efi.mps,
69 &efi.acpi,
70 &efi.acpi20,
71 &efi.smbios,
72 &efi.smbios3,
73 &efi.boot_info,
74 &efi.hcdp,
75 &efi.uga,
76 #ifdef CONFIG_X86_UV
77 &uv_systab_phys,
78 #endif
79 &efi.fw_vendor,
80 &efi.runtime,
81 &efi.config_table,
82 &efi.esrt,
83 &efi.properties_table,
84 &efi.mem_attr_table,
85 #ifdef CONFIG_EFI_RCI2_TABLE
86 &rci2_table_phys,
87 #endif
90 u64 efi_setup; /* efi setup_data physical address */
92 static int add_efi_memmap __initdata;
93 static int __init setup_add_efi_memmap(char *arg)
95 add_efi_memmap = 1;
96 return 0;
98 early_param("add_efi_memmap", setup_add_efi_memmap);
100 void __init efi_find_mirror(void)
102 efi_memory_desc_t *md;
103 u64 mirror_size = 0, total_size = 0;
105 if (!efi_enabled(EFI_MEMMAP))
106 return;
108 for_each_efi_memory_desc(md) {
109 unsigned long long start = md->phys_addr;
110 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
112 total_size += size;
113 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
114 memblock_mark_mirror(start, size);
115 mirror_size += size;
118 if (mirror_size)
119 pr_info("Memory: %lldM/%lldM mirrored memory\n",
120 mirror_size>>20, total_size>>20);
124 * Tell the kernel about the EFI memory map. This might include
125 * more than the max 128 entries that can fit in the passed in e820
126 * legacy (zeropage) memory map, but the kernel's e820 table can hold
127 * E820_MAX_ENTRIES.
130 static void __init do_add_efi_memmap(void)
132 efi_memory_desc_t *md;
134 if (!efi_enabled(EFI_MEMMAP))
135 return;
137 for_each_efi_memory_desc(md) {
138 unsigned long long start = md->phys_addr;
139 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
140 int e820_type;
142 switch (md->type) {
143 case EFI_LOADER_CODE:
144 case EFI_LOADER_DATA:
145 case EFI_BOOT_SERVICES_CODE:
146 case EFI_BOOT_SERVICES_DATA:
147 case EFI_CONVENTIONAL_MEMORY:
148 if (efi_soft_reserve_enabled()
149 && (md->attribute & EFI_MEMORY_SP))
150 e820_type = E820_TYPE_SOFT_RESERVED;
151 else if (md->attribute & EFI_MEMORY_WB)
152 e820_type = E820_TYPE_RAM;
153 else
154 e820_type = E820_TYPE_RESERVED;
155 break;
156 case EFI_ACPI_RECLAIM_MEMORY:
157 e820_type = E820_TYPE_ACPI;
158 break;
159 case EFI_ACPI_MEMORY_NVS:
160 e820_type = E820_TYPE_NVS;
161 break;
162 case EFI_UNUSABLE_MEMORY:
163 e820_type = E820_TYPE_UNUSABLE;
164 break;
165 case EFI_PERSISTENT_MEMORY:
166 e820_type = E820_TYPE_PMEM;
167 break;
168 default:
170 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
171 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
172 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
174 e820_type = E820_TYPE_RESERVED;
175 break;
178 e820__range_add(start, size, e820_type);
180 e820__update_table(e820_table);
184 * Given add_efi_memmap defaults to 0 and there there is no alternative
185 * e820 mechanism for soft-reserved memory, import the full EFI memory
186 * map if soft reservations are present and enabled. Otherwise, the
187 * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
188 * the efi=nosoftreserve option.
190 static bool do_efi_soft_reserve(void)
192 efi_memory_desc_t *md;
194 if (!efi_enabled(EFI_MEMMAP))
195 return false;
197 if (!efi_soft_reserve_enabled())
198 return false;
200 for_each_efi_memory_desc(md)
201 if (md->type == EFI_CONVENTIONAL_MEMORY &&
202 (md->attribute & EFI_MEMORY_SP))
203 return true;
204 return false;
207 int __init efi_memblock_x86_reserve_range(void)
209 struct efi_info *e = &boot_params.efi_info;
210 struct efi_memory_map_data data;
211 phys_addr_t pmap;
212 int rv;
214 if (efi_enabled(EFI_PARAVIRT))
215 return 0;
217 #ifdef CONFIG_X86_32
218 /* Can't handle data above 4GB at this time */
219 if (e->efi_memmap_hi) {
220 pr_err("Memory map is above 4GB, disabling EFI.\n");
221 return -EINVAL;
223 pmap = e->efi_memmap;
224 #else
225 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
226 #endif
227 data.phys_map = pmap;
228 data.size = e->efi_memmap_size;
229 data.desc_size = e->efi_memdesc_size;
230 data.desc_version = e->efi_memdesc_version;
232 rv = efi_memmap_init_early(&data);
233 if (rv)
234 return rv;
236 if (add_efi_memmap || do_efi_soft_reserve())
237 do_add_efi_memmap();
239 efi_fake_memmap_early();
241 WARN(efi.memmap.desc_version != 1,
242 "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
243 efi.memmap.desc_version);
245 memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
247 return 0;
250 #define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
251 #define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
252 #define U64_HIGH_BIT (~(U64_MAX >> 1))
254 static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
256 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
257 u64 end_hi = 0;
258 char buf[64];
260 if (md->num_pages == 0) {
261 end = 0;
262 } else if (md->num_pages > EFI_PAGES_MAX ||
263 EFI_PAGES_MAX - md->num_pages <
264 (md->phys_addr >> EFI_PAGE_SHIFT)) {
265 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
266 >> OVERFLOW_ADDR_SHIFT;
268 if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
269 end_hi += 1;
270 } else {
271 return true;
274 pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
276 if (end_hi) {
277 pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
278 i, efi_md_typeattr_format(buf, sizeof(buf), md),
279 md->phys_addr, end_hi, end);
280 } else {
281 pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
282 i, efi_md_typeattr_format(buf, sizeof(buf), md),
283 md->phys_addr, end);
285 return false;
288 static void __init efi_clean_memmap(void)
290 efi_memory_desc_t *out = efi.memmap.map;
291 const efi_memory_desc_t *in = out;
292 const efi_memory_desc_t *end = efi.memmap.map_end;
293 int i, n_removal;
295 for (i = n_removal = 0; in < end; i++) {
296 if (efi_memmap_entry_valid(in, i)) {
297 if (out != in)
298 memcpy(out, in, efi.memmap.desc_size);
299 out = (void *)out + efi.memmap.desc_size;
300 } else {
301 n_removal++;
303 in = (void *)in + efi.memmap.desc_size;
306 if (n_removal > 0) {
307 struct efi_memory_map_data data = {
308 .phys_map = efi.memmap.phys_map,
309 .desc_version = efi.memmap.desc_version,
310 .desc_size = efi.memmap.desc_size,
311 .size = data.desc_size * (efi.memmap.nr_map - n_removal),
312 .flags = 0,
315 pr_warn("Removing %d invalid memory map entries.\n", n_removal);
316 efi_memmap_install(&data);
320 void __init efi_print_memmap(void)
322 efi_memory_desc_t *md;
323 int i = 0;
325 for_each_efi_memory_desc(md) {
326 char buf[64];
328 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
329 i++, efi_md_typeattr_format(buf, sizeof(buf), md),
330 md->phys_addr,
331 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
332 (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
336 static int __init efi_systab_init(u64 phys)
338 int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
339 : sizeof(efi_system_table_32_t);
340 bool over4g = false;
341 void *p;
343 p = early_memremap_ro(phys, size);
344 if (p == NULL) {
345 pr_err("Couldn't map the system table!\n");
346 return -ENOMEM;
349 if (efi_enabled(EFI_64BIT)) {
350 const efi_system_table_64_t *systab64 = p;
352 efi_systab.hdr = systab64->hdr;
353 efi_systab.fw_vendor = systab64->fw_vendor;
354 efi_systab.fw_revision = systab64->fw_revision;
355 efi_systab.con_in_handle = systab64->con_in_handle;
356 efi_systab.con_in = systab64->con_in;
357 efi_systab.con_out_handle = systab64->con_out_handle;
358 efi_systab.con_out = (void *)(unsigned long)systab64->con_out;
359 efi_systab.stderr_handle = systab64->stderr_handle;
360 efi_systab.stderr = systab64->stderr;
361 efi_systab.runtime = (void *)(unsigned long)systab64->runtime;
362 efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
363 efi_systab.nr_tables = systab64->nr_tables;
364 efi_systab.tables = systab64->tables;
366 over4g = systab64->con_in_handle > U32_MAX ||
367 systab64->con_in > U32_MAX ||
368 systab64->con_out_handle > U32_MAX ||
369 systab64->con_out > U32_MAX ||
370 systab64->stderr_handle > U32_MAX ||
371 systab64->stderr > U32_MAX ||
372 systab64->boottime > U32_MAX;
374 if (efi_setup) {
375 struct efi_setup_data *data;
377 data = early_memremap_ro(efi_setup, sizeof(*data));
378 if (!data) {
379 early_memunmap(p, size);
380 return -ENOMEM;
383 efi_systab.fw_vendor = (unsigned long)data->fw_vendor;
384 efi_systab.runtime = (void *)(unsigned long)data->runtime;
385 efi_systab.tables = (unsigned long)data->tables;
387 over4g |= data->fw_vendor > U32_MAX ||
388 data->runtime > U32_MAX ||
389 data->tables > U32_MAX;
391 early_memunmap(data, sizeof(*data));
392 } else {
393 over4g |= systab64->fw_vendor > U32_MAX ||
394 systab64->runtime > U32_MAX ||
395 systab64->tables > U32_MAX;
397 } else {
398 const efi_system_table_32_t *systab32 = p;
400 efi_systab.hdr = systab32->hdr;
401 efi_systab.fw_vendor = systab32->fw_vendor;
402 efi_systab.fw_revision = systab32->fw_revision;
403 efi_systab.con_in_handle = systab32->con_in_handle;
404 efi_systab.con_in = systab32->con_in;
405 efi_systab.con_out_handle = systab32->con_out_handle;
406 efi_systab.con_out = (void *)(unsigned long)systab32->con_out;
407 efi_systab.stderr_handle = systab32->stderr_handle;
408 efi_systab.stderr = systab32->stderr;
409 efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
410 efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
411 efi_systab.nr_tables = systab32->nr_tables;
412 efi_systab.tables = systab32->tables;
415 early_memunmap(p, size);
417 if (IS_ENABLED(CONFIG_X86_32) && over4g) {
418 pr_err("EFI data located above 4GB, disabling EFI.\n");
419 return -EINVAL;
422 efi.systab = &efi_systab;
425 * Verify the EFI Table
427 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
428 pr_err("System table signature incorrect!\n");
429 return -EINVAL;
431 if ((efi.systab->hdr.revision >> 16) == 0)
432 pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
433 efi.systab->hdr.revision >> 16,
434 efi.systab->hdr.revision & 0xffff);
436 return 0;
439 void __init efi_init(void)
441 efi_char16_t *c16;
442 char vendor[100] = "unknown";
443 int i = 0;
445 if (IS_ENABLED(CONFIG_X86_32) &&
446 (boot_params.efi_info.efi_systab_hi ||
447 boot_params.efi_info.efi_memmap_hi)) {
448 pr_info("Table located above 4GB, disabling EFI.\n");
449 return;
452 efi_systab_phys = boot_params.efi_info.efi_systab |
453 ((__u64)boot_params.efi_info.efi_systab_hi << 32);
455 if (efi_systab_init(efi_systab_phys))
456 return;
458 efi.config_table = (unsigned long)efi.systab->tables;
459 efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
460 efi.runtime = (unsigned long)efi.systab->runtime;
463 * Show what we know for posterity
465 c16 = early_memremap_ro(efi.systab->fw_vendor,
466 sizeof(vendor) * sizeof(efi_char16_t));
467 if (c16) {
468 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
469 vendor[i] = c16[i];
470 vendor[i] = '\0';
471 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
472 } else {
473 pr_err("Could not map the firmware vendor!\n");
476 pr_info("EFI v%u.%.02u by %s\n",
477 efi.systab->hdr.revision >> 16,
478 efi.systab->hdr.revision & 0xffff, vendor);
480 if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
481 return;
483 if (efi_config_init(arch_tables))
484 return;
487 * Note: We currently don't support runtime services on an EFI
488 * that doesn't match the kernel 32/64-bit mode.
491 if (!efi_runtime_supported())
492 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
494 if (!efi_runtime_supported() || efi_runtime_disabled()) {
495 efi_memmap_unmap();
496 return;
499 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
500 efi_clean_memmap();
502 if (efi_enabled(EFI_DBG))
503 efi_print_memmap();
506 #if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV)
508 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
510 u64 addr, npages;
512 addr = md->virt_addr;
513 npages = md->num_pages;
515 memrange_efi_to_native(&addr, &npages);
517 if (executable)
518 set_memory_x(addr, npages);
519 else
520 set_memory_nx(addr, npages);
523 void __init runtime_code_page_mkexec(void)
525 efi_memory_desc_t *md;
527 /* Make EFI runtime service code area executable */
528 for_each_efi_memory_desc(md) {
529 if (md->type != EFI_RUNTIME_SERVICES_CODE)
530 continue;
532 efi_set_executable(md, true);
536 void __init efi_memory_uc(u64 addr, unsigned long size)
538 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
539 u64 npages;
541 npages = round_up(size, page_shift) / page_shift;
542 memrange_efi_to_native(&addr, &npages);
543 set_memory_uc(addr, npages);
546 void __init old_map_region(efi_memory_desc_t *md)
548 u64 start_pfn, end_pfn, end;
549 unsigned long size;
550 void *va;
552 start_pfn = PFN_DOWN(md->phys_addr);
553 size = md->num_pages << PAGE_SHIFT;
554 end = md->phys_addr + size;
555 end_pfn = PFN_UP(end);
557 if (pfn_range_is_mapped(start_pfn, end_pfn)) {
558 va = __va(md->phys_addr);
560 if (!(md->attribute & EFI_MEMORY_WB))
561 efi_memory_uc((u64)(unsigned long)va, size);
562 } else
563 va = efi_ioremap(md->phys_addr, size,
564 md->type, md->attribute);
566 md->virt_addr = (u64) (unsigned long) va;
567 if (!va)
568 pr_err("ioremap of 0x%llX failed!\n",
569 (unsigned long long)md->phys_addr);
572 #endif
574 /* Merge contiguous regions of the same type and attribute */
575 static void __init efi_merge_regions(void)
577 efi_memory_desc_t *md, *prev_md = NULL;
579 for_each_efi_memory_desc(md) {
580 u64 prev_size;
582 if (!prev_md) {
583 prev_md = md;
584 continue;
587 if (prev_md->type != md->type ||
588 prev_md->attribute != md->attribute) {
589 prev_md = md;
590 continue;
593 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
595 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
596 prev_md->num_pages += md->num_pages;
597 md->type = EFI_RESERVED_TYPE;
598 md->attribute = 0;
599 continue;
601 prev_md = md;
605 static void __init get_systab_virt_addr(efi_memory_desc_t *md)
607 unsigned long size;
608 u64 end, systab;
610 size = md->num_pages << EFI_PAGE_SHIFT;
611 end = md->phys_addr + size;
612 systab = efi_systab_phys;
613 if (md->phys_addr <= systab && systab < end) {
614 systab += md->virt_addr - md->phys_addr;
615 efi.systab = (efi_system_table_t *)(unsigned long)systab;
619 static void *realloc_pages(void *old_memmap, int old_shift)
621 void *ret;
623 ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
624 if (!ret)
625 goto out;
628 * A first-time allocation doesn't have anything to copy.
630 if (!old_memmap)
631 return ret;
633 memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
635 out:
636 free_pages((unsigned long)old_memmap, old_shift);
637 return ret;
641 * Iterate the EFI memory map in reverse order because the regions
642 * will be mapped top-down. The end result is the same as if we had
643 * mapped things forward, but doesn't require us to change the
644 * existing implementation of efi_map_region().
646 static inline void *efi_map_next_entry_reverse(void *entry)
648 /* Initial call */
649 if (!entry)
650 return efi.memmap.map_end - efi.memmap.desc_size;
652 entry -= efi.memmap.desc_size;
653 if (entry < efi.memmap.map)
654 return NULL;
656 return entry;
660 * efi_map_next_entry - Return the next EFI memory map descriptor
661 * @entry: Previous EFI memory map descriptor
663 * This is a helper function to iterate over the EFI memory map, which
664 * we do in different orders depending on the current configuration.
666 * To begin traversing the memory map @entry must be %NULL.
668 * Returns %NULL when we reach the end of the memory map.
670 static void *efi_map_next_entry(void *entry)
672 if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) {
674 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
675 * config table feature requires us to map all entries
676 * in the same order as they appear in the EFI memory
677 * map. That is to say, entry N must have a lower
678 * virtual address than entry N+1. This is because the
679 * firmware toolchain leaves relative references in
680 * the code/data sections, which are split and become
681 * separate EFI memory regions. Mapping things
682 * out-of-order leads to the firmware accessing
683 * unmapped addresses.
685 * Since we need to map things this way whether or not
686 * the kernel actually makes use of
687 * EFI_PROPERTIES_TABLE, let's just switch to this
688 * scheme by default for 64-bit.
690 return efi_map_next_entry_reverse(entry);
693 /* Initial call */
694 if (!entry)
695 return efi.memmap.map;
697 entry += efi.memmap.desc_size;
698 if (entry >= efi.memmap.map_end)
699 return NULL;
701 return entry;
704 static bool should_map_region(efi_memory_desc_t *md)
707 * Runtime regions always require runtime mappings (obviously).
709 if (md->attribute & EFI_MEMORY_RUNTIME)
710 return true;
713 * 32-bit EFI doesn't suffer from the bug that requires us to
714 * reserve boot services regions, and mixed mode support
715 * doesn't exist for 32-bit kernels.
717 if (IS_ENABLED(CONFIG_X86_32))
718 return false;
721 * EFI specific purpose memory may be reserved by default
722 * depending on kernel config and boot options.
724 if (md->type == EFI_CONVENTIONAL_MEMORY &&
725 efi_soft_reserve_enabled() &&
726 (md->attribute & EFI_MEMORY_SP))
727 return false;
730 * Map all of RAM so that we can access arguments in the 1:1
731 * mapping when making EFI runtime calls.
733 if (efi_is_mixed()) {
734 if (md->type == EFI_CONVENTIONAL_MEMORY ||
735 md->type == EFI_LOADER_DATA ||
736 md->type == EFI_LOADER_CODE)
737 return true;
741 * Map boot services regions as a workaround for buggy
742 * firmware that accesses them even when they shouldn't.
744 * See efi_{reserve,free}_boot_services().
746 if (md->type == EFI_BOOT_SERVICES_CODE ||
747 md->type == EFI_BOOT_SERVICES_DATA)
748 return true;
750 return false;
754 * Map the efi memory ranges of the runtime services and update new_mmap with
755 * virtual addresses.
757 static void * __init efi_map_regions(int *count, int *pg_shift)
759 void *p, *new_memmap = NULL;
760 unsigned long left = 0;
761 unsigned long desc_size;
762 efi_memory_desc_t *md;
764 desc_size = efi.memmap.desc_size;
766 p = NULL;
767 while ((p = efi_map_next_entry(p))) {
768 md = p;
770 if (!should_map_region(md))
771 continue;
773 efi_map_region(md);
774 get_systab_virt_addr(md);
776 if (left < desc_size) {
777 new_memmap = realloc_pages(new_memmap, *pg_shift);
778 if (!new_memmap)
779 return NULL;
781 left += PAGE_SIZE << *pg_shift;
782 (*pg_shift)++;
785 memcpy(new_memmap + (*count * desc_size), md, desc_size);
787 left -= desc_size;
788 (*count)++;
791 return new_memmap;
794 static void __init kexec_enter_virtual_mode(void)
796 #ifdef CONFIG_KEXEC_CORE
797 efi_memory_desc_t *md;
798 unsigned int num_pages;
800 efi.systab = NULL;
803 * We don't do virtual mode, since we don't do runtime services, on
804 * non-native EFI. With the UV1 memmap, we don't do runtime services in
805 * kexec kernel because in the initial boot something else might
806 * have been mapped at these virtual addresses.
808 if (efi_is_mixed() || efi_have_uv1_memmap()) {
809 efi_memmap_unmap();
810 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
811 return;
814 if (efi_alloc_page_tables()) {
815 pr_err("Failed to allocate EFI page tables\n");
816 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
817 return;
821 * Map efi regions which were passed via setup_data. The virt_addr is a
822 * fixed addr which was used in first kernel of a kexec boot.
824 for_each_efi_memory_desc(md) {
825 efi_map_region_fixed(md); /* FIXME: add error handling */
826 get_systab_virt_addr(md);
830 * Unregister the early EFI memmap from efi_init() and install
831 * the new EFI memory map.
833 efi_memmap_unmap();
835 if (efi_memmap_init_late(efi.memmap.phys_map,
836 efi.memmap.desc_size * efi.memmap.nr_map)) {
837 pr_err("Failed to remap late EFI memory map\n");
838 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
839 return;
842 BUG_ON(!efi.systab);
844 num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
845 num_pages >>= PAGE_SHIFT;
847 if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
848 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
849 return;
852 efi_sync_low_kernel_mappings();
855 * Now that EFI is in virtual mode, update the function
856 * pointers in the runtime service table to the new virtual addresses.
858 * Call EFI services through wrapper functions.
860 efi.runtime_version = efi_systab.hdr.revision;
862 efi_native_runtime_setup();
863 #endif
867 * This function will switch the EFI runtime services to virtual mode.
868 * Essentially, we look through the EFI memmap and map every region that
869 * has the runtime attribute bit set in its memory descriptor into the
870 * efi_pgd page table.
872 * The old method which used to update that memory descriptor with the
873 * virtual address obtained from ioremap() is still supported when the
874 * kernel is booted on SG1 UV1 hardware. Same old method enabled the
875 * runtime services to be called without having to thunk back into
876 * physical mode for every invocation.
878 * The new method does a pagetable switch in a preemption-safe manner
879 * so that we're in a different address space when calling a runtime
880 * function. For function arguments passing we do copy the PUDs of the
881 * kernel page table into efi_pgd prior to each call.
883 * Specially for kexec boot, efi runtime maps in previous kernel should
884 * be passed in via setup_data. In that case runtime ranges will be mapped
885 * to the same virtual addresses as the first kernel, see
886 * kexec_enter_virtual_mode().
888 static void __init __efi_enter_virtual_mode(void)
890 int count = 0, pg_shift = 0;
891 void *new_memmap = NULL;
892 efi_status_t status;
893 unsigned long pa;
895 efi.systab = NULL;
897 if (efi_alloc_page_tables()) {
898 pr_err("Failed to allocate EFI page tables\n");
899 goto err;
902 efi_merge_regions();
903 new_memmap = efi_map_regions(&count, &pg_shift);
904 if (!new_memmap) {
905 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
906 goto err;
909 pa = __pa(new_memmap);
912 * Unregister the early EFI memmap from efi_init() and install
913 * the new EFI memory map that we are about to pass to the
914 * firmware via SetVirtualAddressMap().
916 efi_memmap_unmap();
918 if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
919 pr_err("Failed to remap late EFI memory map\n");
920 goto err;
923 if (efi_enabled(EFI_DBG)) {
924 pr_info("EFI runtime memory map:\n");
925 efi_print_memmap();
928 if (WARN_ON(!efi.systab))
929 goto err;
931 if (efi_setup_page_tables(pa, 1 << pg_shift))
932 goto err;
934 efi_sync_low_kernel_mappings();
936 status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
937 efi.memmap.desc_size,
938 efi.memmap.desc_version,
939 (efi_memory_desc_t *)pa);
940 if (status != EFI_SUCCESS) {
941 pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
942 status);
943 goto err;
946 efi_free_boot_services();
949 * Now that EFI is in virtual mode, update the function
950 * pointers in the runtime service table to the new virtual addresses.
952 * Call EFI services through wrapper functions.
954 efi.runtime_version = efi_systab.hdr.revision;
956 if (!efi_is_mixed())
957 efi_native_runtime_setup();
958 else
959 efi_thunk_runtime_setup();
962 * Apply more restrictive page table mapping attributes now that
963 * SVAM() has been called and the firmware has performed all
964 * necessary relocation fixups for the new virtual addresses.
966 efi_runtime_update_mappings();
968 /* clean DUMMY object */
969 efi_delete_dummy_variable();
970 return;
972 err:
973 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
976 void __init efi_enter_virtual_mode(void)
978 if (efi_enabled(EFI_PARAVIRT))
979 return;
981 if (efi_setup)
982 kexec_enter_virtual_mode();
983 else
984 __efi_enter_virtual_mode();
986 efi_dump_pagetable();
989 bool efi_is_table_address(unsigned long phys_addr)
991 unsigned int i;
993 if (phys_addr == EFI_INVALID_TABLE_ADDR)
994 return false;
996 for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
997 if (*(efi_tables[i]) == phys_addr)
998 return true;
1000 return false;