spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / arch / arm / mm / ioremap.c
blob80632e8d7538f33d5a6539df5a72a90fff52e7ec
1 /*
2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27 #include <linux/io.h>
29 #include <asm/cputype.h>
30 #include <asm/cacheflush.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/sizes.h>
36 #include <asm/mach/map.h>
37 #include "mm.h"
39 int ioremap_page(unsigned long virt, unsigned long phys,
40 const struct mem_type *mtype)
42 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
43 __pgprot(mtype->prot_pte));
45 EXPORT_SYMBOL(ioremap_page);
47 void __check_kvm_seq(struct mm_struct *mm)
49 unsigned int seq;
51 do {
52 seq = init_mm.context.kvm_seq;
53 memcpy(pgd_offset(mm, VMALLOC_START),
54 pgd_offset_k(VMALLOC_START),
55 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
56 pgd_index(VMALLOC_START)));
57 mm->context.kvm_seq = seq;
58 } while (seq != init_mm.context.kvm_seq);
61 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
63 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
64 * the other CPUs will not see this change until their next context switch.
65 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
66 * which requires the new ioremap'd region to be referenced, the CPU will
67 * reference the _old_ region.
69 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
70 * mask the size back to 1MB aligned or we will overflow in the loop below.
72 static void unmap_area_sections(unsigned long virt, unsigned long size)
74 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
75 pgd_t *pgd;
76 pud_t *pud;
77 pmd_t *pmdp;
79 flush_cache_vunmap(addr, end);
80 pgd = pgd_offset_k(addr);
81 pud = pud_offset(pgd, addr);
82 pmdp = pmd_offset(pud, addr);
83 do {
84 pmd_t pmd = *pmdp;
86 if (!pmd_none(pmd)) {
88 * Clear the PMD from the page table, and
89 * increment the kvm sequence so others
90 * notice this change.
92 * Note: this is still racy on SMP machines.
94 pmd_clear(pmdp);
95 init_mm.context.kvm_seq++;
98 * Free the page table, if there was one.
100 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
101 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
104 addr += PMD_SIZE;
105 pmdp += 2;
106 } while (addr < end);
109 * Ensure that the active_mm is up to date - we want to
110 * catch any use-after-iounmap cases.
112 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
113 __check_kvm_seq(current->active_mm);
115 flush_tlb_kernel_range(virt, end);
118 static int
119 remap_area_sections(unsigned long virt, unsigned long pfn,
120 size_t size, const struct mem_type *type)
122 unsigned long addr = virt, end = virt + size;
123 pgd_t *pgd;
124 pud_t *pud;
125 pmd_t *pmd;
128 * Remove and free any PTE-based mapping, and
129 * sync the current kernel mapping.
131 unmap_area_sections(virt, size);
133 pgd = pgd_offset_k(addr);
134 pud = pud_offset(pgd, addr);
135 pmd = pmd_offset(pud, addr);
136 do {
137 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
138 pfn += SZ_1M >> PAGE_SHIFT;
139 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
140 pfn += SZ_1M >> PAGE_SHIFT;
141 flush_pmd_entry(pmd);
143 addr += PMD_SIZE;
144 pmd += 2;
145 } while (addr < end);
147 return 0;
150 static int
151 remap_area_supersections(unsigned long virt, unsigned long pfn,
152 size_t size, const struct mem_type *type)
154 unsigned long addr = virt, end = virt + size;
155 pgd_t *pgd;
156 pud_t *pud;
157 pmd_t *pmd;
160 * Remove and free any PTE-based mapping, and
161 * sync the current kernel mapping.
163 unmap_area_sections(virt, size);
165 pgd = pgd_offset_k(virt);
166 pud = pud_offset(pgd, addr);
167 pmd = pmd_offset(pud, addr);
168 do {
169 unsigned long super_pmd_val, i;
171 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
172 PMD_SECT_SUPER;
173 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
175 for (i = 0; i < 8; i++) {
176 pmd[0] = __pmd(super_pmd_val);
177 pmd[1] = __pmd(super_pmd_val);
178 flush_pmd_entry(pmd);
180 addr += PMD_SIZE;
181 pmd += 2;
184 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
185 } while (addr < end);
187 return 0;
189 #endif
191 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
192 unsigned long offset, size_t size, unsigned int mtype, void *caller)
194 const struct mem_type *type;
195 int err;
196 unsigned long addr;
197 struct vm_struct * area;
199 #ifndef CONFIG_ARM_LPAE
201 * High mappings must be supersection aligned
203 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
204 return NULL;
205 #endif
207 type = get_mem_type(mtype);
208 if (!type)
209 return NULL;
212 * Page align the mapping size, taking account of any offset.
214 size = PAGE_ALIGN(offset + size);
217 * Try to reuse one of the static mapping whenever possible.
219 read_lock(&vmlist_lock);
220 for (area = vmlist; area; area = area->next) {
221 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
222 break;
223 if (!(area->flags & VM_ARM_STATIC_MAPPING))
224 continue;
225 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
226 continue;
227 if (__phys_to_pfn(area->phys_addr) > pfn ||
228 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
229 continue;
230 /* we can drop the lock here as we know *area is static */
231 read_unlock(&vmlist_lock);
232 addr = (unsigned long)area->addr;
233 addr += __pfn_to_phys(pfn) - area->phys_addr;
234 return (void __iomem *) (offset + addr);
236 read_unlock(&vmlist_lock);
239 * Don't allow RAM to be mapped - this causes problems with ARMv6+
241 if (WARN_ON(pfn_valid(pfn)))
242 return NULL;
244 area = get_vm_area_caller(size, VM_IOREMAP, caller);
245 if (!area)
246 return NULL;
247 addr = (unsigned long)area->addr;
249 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
250 if (DOMAIN_IO == 0 &&
251 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
252 cpu_is_xsc3()) && pfn >= 0x100000 &&
253 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
254 area->flags |= VM_ARM_SECTION_MAPPING;
255 err = remap_area_supersections(addr, pfn, size, type);
256 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
257 area->flags |= VM_ARM_SECTION_MAPPING;
258 err = remap_area_sections(addr, pfn, size, type);
259 } else
260 #endif
261 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
262 __pgprot(type->prot_pte));
264 if (err) {
265 vunmap((void *)addr);
266 return NULL;
269 flush_cache_vmap(addr, addr + size);
270 return (void __iomem *) (offset + addr);
273 void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
274 unsigned int mtype, void *caller)
276 unsigned long last_addr;
277 unsigned long offset = phys_addr & ~PAGE_MASK;
278 unsigned long pfn = __phys_to_pfn(phys_addr);
281 * Don't allow wraparound or zero size
283 last_addr = phys_addr + size - 1;
284 if (!size || last_addr < phys_addr)
285 return NULL;
287 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
288 caller);
292 * Remap an arbitrary physical address space into the kernel virtual
293 * address space. Needed when the kernel wants to access high addresses
294 * directly.
296 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
297 * have to convert them into an offset in a page-aligned mapping, but the
298 * caller shouldn't need to know that small detail.
300 void __iomem *
301 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
302 unsigned int mtype)
304 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
305 __builtin_return_address(0));
307 EXPORT_SYMBOL(__arm_ioremap_pfn);
309 void __iomem *
310 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
312 return __arm_ioremap_caller(phys_addr, size, mtype,
313 __builtin_return_address(0));
315 EXPORT_SYMBOL(__arm_ioremap);
318 * Remap an arbitrary physical address space into the kernel virtual
319 * address space as memory. Needed when the kernel wants to execute
320 * code in external memory. This is needed for reprogramming source
321 * clocks that would affect normal memory for example. Please see
322 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
324 void __iomem *
325 __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
327 unsigned int mtype;
329 if (cached)
330 mtype = MT_MEMORY;
331 else
332 mtype = MT_MEMORY_NONCACHED;
334 return __arm_ioremap_caller(phys_addr, size, mtype,
335 __builtin_return_address(0));
338 void __iounmap(volatile void __iomem *io_addr)
340 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
341 struct vm_struct *vm;
343 read_lock(&vmlist_lock);
344 for (vm = vmlist; vm; vm = vm->next) {
345 if (vm->addr > addr)
346 break;
347 if (!(vm->flags & VM_IOREMAP))
348 continue;
349 /* If this is a static mapping we must leave it alone */
350 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
351 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
352 read_unlock(&vmlist_lock);
353 return;
355 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
357 * If this is a section based mapping we need to handle it
358 * specially as the VM subsystem does not know how to handle
359 * such a beast.
361 if ((vm->addr == addr) &&
362 (vm->flags & VM_ARM_SECTION_MAPPING)) {
363 unmap_area_sections((unsigned long)vm->addr, vm->size);
364 break;
366 #endif
368 read_unlock(&vmlist_lock);
370 vunmap(addr);
372 EXPORT_SYMBOL(__iounmap);