treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / unicore32 / mm / ioremap.c
blob46a64bd6156a41a8d6726c9d3095d495c55e8cae
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/unicore32/mm/ioremap.c
5 * Code specific to PKUnity SoC and UniCore ISA
7 * Copyright (C) 2001-2010 GUAN Xue-tao
9 * Re-map IO memory to kernel address space so that we can access it.
11 * This allows a driver to remap an arbitrary region of bus memory into
12 * virtual space. One should *only* use readl, writel, memcpy_toio and
13 * so on with such remapped areas.
15 * Because UniCore only has a 32-bit address space we can't address the
16 * whole of the (physical) PCI space at once. PCI huge-mode addressing
17 * allows us to circumvent this restriction by splitting PCI space into
18 * two 2GB chunks and mapping only one at a time into processor memory.
19 * We use MMU protection domains to trap any attempt to access the bank
20 * that is not currently mapped. (This isn't fully implemented yet.)
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/io.h>
28 #include <asm/cputype.h>
29 #include <asm/cacheflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <linux/sizes.h>
35 #include <mach/map.h>
36 #include "mm.h"
39 * Used by ioremap() and iounmap() code to mark (super)section-mapped
40 * I/O regions in vm_struct->flags field.
42 #define VM_UNICORE_SECTION_MAPPING 0x80000000
44 int ioremap_page(unsigned long virt, unsigned long phys,
45 const struct mem_type *mtype)
47 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
48 __pgprot(mtype->prot_pte));
50 EXPORT_SYMBOL(ioremap_page);
53 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
54 * the other CPUs will not see this change until their next context switch.
55 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
56 * which requires the new ioremap'd region to be referenced, the CPU will
57 * reference the _old_ region.
59 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
60 * mask the size back to 4MB aligned or we will overflow in the loop below.
62 static void unmap_area_sections(unsigned long virt, unsigned long size)
64 unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
65 pgd_t *pgd;
67 flush_cache_vunmap(addr, end);
68 pgd = pgd_offset_k(addr);
69 do {
70 pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
72 pmd = *pmdp;
73 if (!pmd_none(pmd)) {
75 * Clear the PMD from the page table, and
76 * increment the kvm sequence so others
77 * notice this change.
79 * Note: this is still racy on SMP machines.
81 pmd_clear(pmdp);
84 * Free the page table, if there was one.
86 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
87 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
90 addr += PGDIR_SIZE;
91 pgd++;
92 } while (addr < end);
94 flush_tlb_kernel_range(virt, end);
97 static int
98 remap_area_sections(unsigned long virt, unsigned long pfn,
99 size_t size, const struct mem_type *type)
101 unsigned long addr = virt, end = virt + size;
102 pgd_t *pgd;
105 * Remove and free any PTE-based mapping, and
106 * sync the current kernel mapping.
108 unmap_area_sections(virt, size);
110 pgd = pgd_offset_k(addr);
111 do {
112 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
114 set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
115 pfn += SZ_4M >> PAGE_SHIFT;
116 flush_pmd_entry(pmd);
118 addr += PGDIR_SIZE;
119 pgd++;
120 } while (addr < end);
122 return 0;
125 void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
126 unsigned long offset, size_t size, unsigned int mtype, void *caller)
128 const struct mem_type *type;
129 int err;
130 unsigned long addr;
131 struct vm_struct *area;
134 * High mappings must be section aligned
136 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
137 return NULL;
140 * Don't allow RAM to be mapped
142 if (pfn_valid(pfn)) {
143 WARN(1, "BUG: Your driver calls ioremap() on\n"
144 "system memory. This leads to architecturally\n"
145 "unpredictable behaviour, and ioremap() will fail in\n"
146 "the next kernel release. Please fix your driver.\n");
147 return NULL;
150 type = get_mem_type(mtype);
151 if (!type)
152 return NULL;
155 * Page align the mapping size, taking account of any offset.
157 size = PAGE_ALIGN(offset + size);
159 area = get_vm_area_caller(size, VM_IOREMAP, caller);
160 if (!area)
161 return NULL;
162 addr = (unsigned long)area->addr;
164 if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
165 area->flags |= VM_UNICORE_SECTION_MAPPING;
166 err = remap_area_sections(addr, pfn, size, type);
167 } else
168 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
169 __pgprot(type->prot_pte));
171 if (err) {
172 vunmap((void *)addr);
173 return NULL;
176 flush_cache_vmap(addr, addr + size);
177 return (void __iomem *) (offset + addr);
180 void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
181 unsigned int mtype, void *caller)
183 unsigned long last_addr;
184 unsigned long offset = phys_addr & ~PAGE_MASK;
185 unsigned long pfn = __phys_to_pfn(phys_addr);
188 * Don't allow wraparound or zero size
190 last_addr = phys_addr + size - 1;
191 if (!size || last_addr < phys_addr)
192 return NULL;
194 return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
198 * Remap an arbitrary physical address space into the kernel virtual
199 * address space. Needed when the kernel wants to access high addresses
200 * directly.
202 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
203 * have to convert them into an offset in a page-aligned mapping, but the
204 * caller shouldn't need to know that small detail.
206 void __iomem *
207 __uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
208 unsigned int mtype)
210 return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
211 __builtin_return_address(0));
213 EXPORT_SYMBOL(__uc32_ioremap_pfn);
215 void __iomem *
216 __uc32_ioremap(unsigned long phys_addr, size_t size)
218 return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
219 __builtin_return_address(0));
221 EXPORT_SYMBOL(__uc32_ioremap);
223 void __uc32_iounmap(volatile void __iomem *io_addr)
225 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
226 struct vm_struct *vm;
229 * If this is a section based mapping we need to handle it
230 * specially as the VM subsystem does not know how to handle
231 * such a beast. We need the lock here b/c we need to clear
232 * all the mappings before the area can be reclaimed
233 * by someone else.
235 vm = find_vm_area(addr);
236 if (vm && (vm->flags & VM_IOREMAP) &&
237 (vm->flags & VM_UNICORE_SECTION_MAPPING))
238 unmap_area_sections((unsigned long)vm->addr, vm->size);
240 vunmap(addr);
242 EXPORT_SYMBOL(__uc32_iounmap);