treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / x86 / platform / uv / bios_uv.c
blob607f58147311c7bb763fce47b12fece4c9167458
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * BIOS run time interface routines.
5 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
6 * Copyright (c) Russ Anderson <rja@sgi.com>
7 */
9 #include <linux/efi.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <asm/efi.h>
13 #include <linux/io.h>
14 #include <asm/uv/bios.h>
15 #include <asm/uv/uv_hub.h>
17 unsigned long uv_systab_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
19 struct uv_systab *uv_systab;
21 static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
22 u64 a4, u64 a5)
24 struct uv_systab *tab = uv_systab;
25 s64 ret;
27 if (!tab || !tab->function)
29 * BIOS does not support UV systab
31 return BIOS_STATUS_UNIMPLEMENTED;
34 * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
35 * callback method, which uses efi_call() directly, with the kernel page tables:
37 if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
38 kernel_fpu_begin();
39 ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
40 kernel_fpu_end();
41 } else {
42 ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
45 return ret;
48 s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
50 s64 ret;
52 if (down_interruptible(&__efi_uv_runtime_lock))
53 return BIOS_STATUS_ABORT;
55 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
56 up(&__efi_uv_runtime_lock);
58 return ret;
60 EXPORT_SYMBOL_GPL(uv_bios_call);
62 s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
63 u64 a4, u64 a5)
65 unsigned long bios_flags;
66 s64 ret;
68 if (down_interruptible(&__efi_uv_runtime_lock))
69 return BIOS_STATUS_ABORT;
71 local_irq_save(bios_flags);
72 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
73 local_irq_restore(bios_flags);
75 up(&__efi_uv_runtime_lock);
77 return ret;
81 long sn_partition_id;
82 EXPORT_SYMBOL_GPL(sn_partition_id);
83 long sn_coherency_id;
84 EXPORT_SYMBOL_GPL(sn_coherency_id);
85 long sn_region_size;
86 EXPORT_SYMBOL_GPL(sn_region_size);
87 long system_serial_number;
88 EXPORT_SYMBOL_GPL(system_serial_number);
89 int uv_type;
90 EXPORT_SYMBOL_GPL(uv_type);
93 s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
94 long *region, long *ssn)
96 s64 ret;
97 u64 v0, v1;
98 union partition_info_u part;
100 ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
101 (u64)(&v0), (u64)(&v1), 0, 0);
102 if (ret != BIOS_STATUS_SUCCESS)
103 return ret;
105 part.val = v0;
106 if (uvtype)
107 *uvtype = part.hub_version;
108 if (partid)
109 *partid = part.partition_id;
110 if (coher)
111 *coher = part.coherence_id;
112 if (region)
113 *region = part.region_size;
114 if (ssn)
115 *ssn = v1;
116 return ret;
118 EXPORT_SYMBOL_GPL(uv_bios_get_sn_info);
121 uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
122 unsigned long *intr_mmr_offset)
124 u64 watchlist;
125 s64 ret;
128 * bios returns watchlist number or negative error number.
130 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
131 mq_size, (u64)intr_mmr_offset,
132 (u64)&watchlist, 0);
133 if (ret < BIOS_STATUS_SUCCESS)
134 return ret;
136 return watchlist;
138 EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
141 uv_bios_mq_watchlist_free(int blade, int watchlist_num)
143 return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
144 blade, watchlist_num, 0, 0, 0);
146 EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
149 uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
151 return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
152 perms, 0, 0);
154 EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
157 uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
159 return uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
160 (u64)addr, buf, (u64)len, 0);
162 EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
164 s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
166 return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
167 (u64)ticks_per_second, 0, 0, 0);
169 EXPORT_SYMBOL_GPL(uv_bios_freq_base);
172 * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target
173 * @decode: true to enable target, false to disable target
174 * @domain: PCI domain number
175 * @bus: PCI bus number
177 * Returns:
178 * 0: Success
179 * -EINVAL: Invalid domain or bus number
180 * -ENOSYS: Capability not available
181 * -EBUSY: Legacy VGA I/O cannot be retargeted at this time
183 int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
185 return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET,
186 (u64)decode, (u64)domain, (u64)bus, 0, 0);
188 EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
190 int uv_bios_init(void)
192 uv_systab = NULL;
193 if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
194 !uv_systab_phys || efi_runtime_disabled()) {
195 pr_crit("UV: UVsystab: missing\n");
196 return -EEXIST;
199 uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab));
200 if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
201 pr_err("UV: UVsystab: bad signature!\n");
202 iounmap(uv_systab);
203 return -EINVAL;
206 /* Starting with UV4 the UV systab size is variable */
207 if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
208 int size = uv_systab->size;
210 iounmap(uv_systab);
211 uv_systab = ioremap(uv_systab_phys, size);
212 if (!uv_systab) {
213 pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
214 return -EFAULT;
217 pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
218 return 0;
221 static void __init early_code_mapping_set_exec(int executable)
223 efi_memory_desc_t *md;
225 if (!(__supported_pte_mask & _PAGE_NX))
226 return;
228 /* Make EFI service code area executable */
229 for_each_efi_memory_desc(md) {
230 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
231 md->type == EFI_BOOT_SERVICES_CODE)
232 efi_set_executable(md, executable);
236 void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
239 * After the lock is released, the original page table is restored.
241 int pgd_idx, i;
242 int nr_pgds;
243 pgd_t *pgd;
244 p4d_t *p4d;
245 pud_t *pud;
247 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
249 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
250 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
251 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
253 if (!pgd_present(*pgd))
254 continue;
256 for (i = 0; i < PTRS_PER_P4D; i++) {
257 p4d = p4d_offset(pgd,
258 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
260 if (!p4d_present(*p4d))
261 continue;
263 pud = (pud_t *)p4d_page_vaddr(*p4d);
264 pud_free(&init_mm, pud);
267 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
268 p4d_free(&init_mm, p4d);
271 kfree(save_pgd);
273 __flush_tlb_all();
274 early_code_mapping_set_exec(0);
277 pgd_t * __init efi_uv1_memmap_phys_prolog(void)
279 unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
280 pgd_t *save_pgd, *pgd_k, *pgd_efi;
281 p4d_t *p4d, *p4d_k, *p4d_efi;
282 pud_t *pud;
284 int pgd;
285 int n_pgds, i, j;
287 early_code_mapping_set_exec(1);
289 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
290 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
291 if (!save_pgd)
292 return NULL;
295 * Build 1:1 identity mapping for UV1 memmap usage. Note that
296 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
297 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
298 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
299 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
300 * This means here we can only reuse the PMD tables of the direct mapping.
302 for (pgd = 0; pgd < n_pgds; pgd++) {
303 addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
304 vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
305 pgd_efi = pgd_offset_k(addr_pgd);
306 save_pgd[pgd] = *pgd_efi;
308 p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
309 if (!p4d) {
310 pr_err("Failed to allocate p4d table!\n");
311 goto out;
314 for (i = 0; i < PTRS_PER_P4D; i++) {
315 addr_p4d = addr_pgd + i * P4D_SIZE;
316 p4d_efi = p4d + p4d_index(addr_p4d);
318 pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
319 if (!pud) {
320 pr_err("Failed to allocate pud table!\n");
321 goto out;
324 for (j = 0; j < PTRS_PER_PUD; j++) {
325 addr_pud = addr_p4d + j * PUD_SIZE;
327 if (addr_pud > (max_pfn << PAGE_SHIFT))
328 break;
330 vaddr = (unsigned long)__va(addr_pud);
332 pgd_k = pgd_offset_k(vaddr);
333 p4d_k = p4d_offset(pgd_k, vaddr);
334 pud[j] = *pud_offset(p4d_k, vaddr);
337 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
340 __flush_tlb_all();
341 return save_pgd;
342 out:
343 efi_uv1_memmap_phys_epilog(save_pgd);
344 return NULL;
347 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
348 u32 type, u64 attribute)
350 unsigned long last_map_pfn;
352 if (type == EFI_MEMORY_MAPPED_IO)
353 return ioremap(phys_addr, size);
355 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
356 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
357 unsigned long top = last_map_pfn << PAGE_SHIFT;
358 efi_ioremap(top, size - (top - phys_addr), type, attribute);
361 if (!(attribute & EFI_MEMORY_WB))
362 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
364 return (void __iomem *)__va(phys_addr);
367 static int __init arch_parse_efi_cmdline(char *str)
369 if (!str) {
370 pr_warn("need at least one option\n");
371 return -EINVAL;
374 if (!efi_is_mixed() && parse_option_str(str, "old_map"))
375 set_bit(EFI_UV1_MEMMAP, &efi.flags);
377 return 0;
379 early_param("efi", arch_parse_efi_cmdline);