Linux 5.7.6
[linux/fpc-iii.git] / arch / x86 / mm / iomap_32.c
blobf60398aeb6445fb4537fb7be3fdac7f12e411f81
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright © 2008 Ingo Molnar
4 */
6 #include <asm/iomap.h>
7 #include <asm/memtype.h>
8 #include <linux/export.h>
9 #include <linux/highmem.h>
11 static int is_io_mapping_possible(resource_size_t base, unsigned long size)
13 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
14 /* There is no way to map greater than 1 << 32 address without PAE */
15 if (base + size > 0x100000000ULL)
16 return 0;
17 #endif
18 return 1;
21 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
23 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
24 int ret;
26 if (!is_io_mapping_possible(base, size))
27 return -EINVAL;
29 ret = memtype_reserve_io(base, base + size, &pcm);
30 if (ret)
31 return ret;
33 *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
34 /* Filter out unsupported __PAGE_KERNEL* bits: */
35 pgprot_val(*prot) &= __default_kernel_pte_mask;
37 return 0;
39 EXPORT_SYMBOL_GPL(iomap_create_wc);
41 void iomap_free(resource_size_t base, unsigned long size)
43 memtype_free_io(base, base + size);
45 EXPORT_SYMBOL_GPL(iomap_free);
47 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
49 unsigned long vaddr;
50 int idx, type;
52 preempt_disable();
53 pagefault_disable();
55 type = kmap_atomic_idx_push();
56 idx = type + KM_TYPE_NR * smp_processor_id();
57 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
59 arch_flush_lazy_mmu_mode();
61 return (void *)vaddr;
65 * Map 'pfn' using protections 'prot'
67 void __iomem *
68 iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
71 * For non-PAT systems, translate non-WB request to UC- just in
72 * case the caller set the PWT bit to prot directly without using
73 * pgprot_writecombine(). UC- translates to uncached if the MTRR
74 * is UC or WC. UC- gets the real intention, of the user, which is
75 * "WC if the MTRR is WC, UC if you can't do that."
77 if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
78 prot = __pgprot(__PAGE_KERNEL |
79 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
81 /* Filter out unsupported __PAGE_KERNEL* bits: */
82 pgprot_val(prot) &= __default_kernel_pte_mask;
84 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
86 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
88 void
89 iounmap_atomic(void __iomem *kvaddr)
91 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
93 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
94 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
95 int idx, type;
97 type = kmap_atomic_idx();
98 idx = type + KM_TYPE_NR * smp_processor_id();
100 #ifdef CONFIG_DEBUG_HIGHMEM
101 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
102 #endif
104 * Force other mappings to Oops if they'll try to access this
105 * pte without first remap it. Keeping stale mappings around
106 * is a bad idea also, in case the page changes cacheability
107 * attributes or becomes a protected page in a hypervisor.
109 kpte_clear_flush(kmap_pte-idx, vaddr);
110 kmap_atomic_idx_pop();
113 pagefault_enable();
114 preempt_enable();
116 EXPORT_SYMBOL_GPL(iounmap_atomic);