Merge tag 'linux-kselftest-kunit-fixes-5.11-rc3' of git://git.kernel.org/pub/scm...
[linux/fpc-iii.git] / arch / powerpc / mm / ioremap.c
blobb1a0aebe8c48da3a7604e11b6f268541ed98f8b9
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/mmzone.h>
6 #include <linux/vmalloc.h>
7 #include <asm/io-workarounds.h>
9 unsigned long ioremap_bot;
10 EXPORT_SYMBOL(ioremap_bot);
12 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
14 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
15 void *caller = __builtin_return_address(0);
17 if (iowa_is_active())
18 return iowa_ioremap(addr, size, prot, caller);
19 return __ioremap_caller(addr, size, prot, caller);
21 EXPORT_SYMBOL(ioremap);
23 void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
25 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
26 void *caller = __builtin_return_address(0);
28 if (iowa_is_active())
29 return iowa_ioremap(addr, size, prot, caller);
30 return __ioremap_caller(addr, size, prot, caller);
32 EXPORT_SYMBOL(ioremap_wc);
34 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
36 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
37 void *caller = __builtin_return_address(0);
39 if (iowa_is_active())
40 return iowa_ioremap(addr, size, prot, caller);
41 return __ioremap_caller(addr, size, prot, caller);
44 void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
46 pte_t pte = __pte(flags);
47 void *caller = __builtin_return_address(0);
49 /* writeable implies dirty for kernel addresses */
50 if (pte_write(pte))
51 pte = pte_mkdirty(pte);
53 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
54 pte = pte_exprotect(pte);
55 pte = pte_mkprivileged(pte);
57 if (iowa_is_active())
58 return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
59 return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
61 EXPORT_SYMBOL(ioremap_prot);
63 int early_ioremap_range(unsigned long ea, phys_addr_t pa,
64 unsigned long size, pgprot_t prot)
66 unsigned long i;
68 for (i = 0; i < size; i += PAGE_SIZE) {
69 int err = map_kernel_page(ea + i, pa + i, prot);
71 if (WARN_ON_ONCE(err)) /* Should clean up */
72 return err;
75 return 0;
78 void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
79 pgprot_t prot, void *caller)
81 struct vm_struct *area;
82 int ret;
83 unsigned long va;
85 area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
86 if (area == NULL)
87 return NULL;
89 area->phys_addr = pa;
90 va = (unsigned long)area->addr;
92 ret = ioremap_page_range(va, va + size, pa, prot);
93 if (!ret)
94 return (void __iomem *)area->addr + offset;
96 unmap_kernel_range(va, size);
97 free_vm_area(area);
99 return NULL;
102 #ifdef CONFIG_ZONE_DEVICE
104 * Override the generic version in mm/memremap.c.
106 * With hash translation, the direct-map range is mapped with just one
107 * page size selected by htab_init_page_sizes(). Consult
108 * mmu_psize_defs[] to determine the minimum page size alignment.
110 unsigned long memremap_compat_align(void)
112 unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
114 if (radix_enabled())
115 return SUBSECTION_SIZE;
116 return max(SUBSECTION_SIZE, 1UL << shift);
119 EXPORT_SYMBOL_GPL(memremap_compat_align);
120 #endif