Merge remote-tracking branch 's5p/for-next'
[linux-2.6/next.git] / arch / arm / mm / ioremap.c
blob645011bf8d46acaca23af286ba848e113ae68bd2
1 /*
2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27 #include <linux/io.h>
29 #include <asm/cputype.h>
30 #include <asm/cacheflush.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/sizes.h>
36 #include <asm/mach/map.h>
37 #include "mm.h"
40 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
43 #define VM_ARM_SECTION_MAPPING 0x80000000
45 int ioremap_page(unsigned long virt, unsigned long phys,
46 const struct mem_type *mtype)
48 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
49 __pgprot(mtype->prot_pte));
51 EXPORT_SYMBOL(ioremap_page);
53 void __check_kvm_seq(struct mm_struct *mm)
55 unsigned int seq;
57 do {
58 seq = init_mm.context.kvm_seq;
59 memcpy(pgd_offset(mm, VMALLOC_START),
60 pgd_offset_k(VMALLOC_START),
61 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
62 pgd_index(VMALLOC_START)));
63 mm->context.kvm_seq = seq;
64 } while (seq != init_mm.context.kvm_seq);
67 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
69 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
70 * the other CPUs will not see this change until their next context switch.
71 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
72 * which requires the new ioremap'd region to be referenced, the CPU will
73 * reference the _old_ region.
75 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
76 * mask the size back to 1MB aligned or we will overflow in the loop below.
78 static void unmap_area_sections(unsigned long virt, unsigned long size)
80 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
81 pgd_t *pgd;
82 pud_t *pud;
83 pmd_t *pmdp;
85 flush_cache_vunmap(addr, end);
86 pgd = pgd_offset_k(addr);
87 pud = pud_offset(pgd, addr);
88 pmdp = pmd_offset(pud, addr);
89 do {
90 pmd_t pmd = *pmdp;
92 if (!pmd_none(pmd)) {
94 * Clear the PMD from the page table, and
95 * increment the kvm sequence so others
96 * notice this change.
98 * Note: this is still racy on SMP machines.
100 pmd_clear(pmdp);
101 init_mm.context.kvm_seq++;
104 * Free the page table, if there was one.
106 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
107 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
110 addr += PMD_SIZE;
111 pmdp += 2;
112 } while (addr < end);
115 * Ensure that the active_mm is up to date - we want to
116 * catch any use-after-iounmap cases.
118 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
119 __check_kvm_seq(current->active_mm);
121 flush_tlb_kernel_range(virt, end);
124 static int
125 remap_area_sections(unsigned long virt, unsigned long pfn,
126 size_t size, const struct mem_type *type)
128 unsigned long addr = virt, end = virt + size;
129 pgd_t *pgd;
130 pud_t *pud;
131 pmd_t *pmd;
134 * Remove and free any PTE-based mapping, and
135 * sync the current kernel mapping.
137 unmap_area_sections(virt, size);
139 pgd = pgd_offset_k(addr);
140 pud = pud_offset(pgd, addr);
141 pmd = pmd_offset(pud, addr);
142 do {
143 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
144 pfn += SZ_1M >> PAGE_SHIFT;
145 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
146 pfn += SZ_1M >> PAGE_SHIFT;
147 flush_pmd_entry(pmd);
149 addr += PMD_SIZE;
150 pmd += 2;
151 } while (addr < end);
153 return 0;
156 static int
157 remap_area_supersections(unsigned long virt, unsigned long pfn,
158 size_t size, const struct mem_type *type)
160 unsigned long addr = virt, end = virt + size;
161 pgd_t *pgd;
162 pud_t *pud;
163 pmd_t *pmd;
166 * Remove and free any PTE-based mapping, and
167 * sync the current kernel mapping.
169 unmap_area_sections(virt, size);
171 pgd = pgd_offset_k(virt);
172 pud = pud_offset(pgd, addr);
173 pmd = pmd_offset(pud, addr);
174 do {
175 unsigned long super_pmd_val, i;
177 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
178 PMD_SECT_SUPER;
179 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
181 for (i = 0; i < 8; i++) {
182 pmd[0] = __pmd(super_pmd_val);
183 pmd[1] = __pmd(super_pmd_val);
184 flush_pmd_entry(pmd);
186 addr += PMD_SIZE;
187 pmd += 2;
190 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
191 } while (addr < end);
193 return 0;
195 #endif
197 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
198 unsigned long offset, size_t size, unsigned int mtype, void *caller)
200 const struct mem_type *type;
201 int err;
202 unsigned long addr;
203 struct vm_struct * area;
205 #ifndef CONFIG_ARM_LPAE
207 * High mappings must be supersection aligned
209 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
210 return NULL;
211 #endif
214 * Don't allow RAM to be mapped - this causes problems with ARMv6+
216 if (WARN_ON(pfn_valid(pfn)))
217 return NULL;
219 type = get_mem_type(mtype);
220 if (!type)
221 return NULL;
224 * Page align the mapping size, taking account of any offset.
226 size = PAGE_ALIGN(offset + size);
228 area = get_vm_area_caller(size, VM_IOREMAP, caller);
229 if (!area)
230 return NULL;
231 addr = (unsigned long)area->addr;
233 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
234 if (DOMAIN_IO == 0 &&
235 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
236 cpu_is_xsc3()) && pfn >= 0x100000 &&
237 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
238 area->flags |= VM_ARM_SECTION_MAPPING;
239 err = remap_area_supersections(addr, pfn, size, type);
240 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
241 area->flags |= VM_ARM_SECTION_MAPPING;
242 err = remap_area_sections(addr, pfn, size, type);
243 } else
244 #endif
245 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
246 __pgprot(type->prot_pte));
248 if (err) {
249 vunmap((void *)addr);
250 return NULL;
253 flush_cache_vmap(addr, addr + size);
254 return (void __iomem *) (offset + addr);
257 void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
258 unsigned int mtype, void *caller)
260 unsigned long last_addr;
261 unsigned long offset = phys_addr & ~PAGE_MASK;
262 unsigned long pfn = __phys_to_pfn(phys_addr);
265 * Don't allow wraparound or zero size
267 last_addr = phys_addr + size - 1;
268 if (!size || last_addr < phys_addr)
269 return NULL;
271 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
272 caller);
276 * Remap an arbitrary physical address space into the kernel virtual
277 * address space. Needed when the kernel wants to access high addresses
278 * directly.
280 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
281 * have to convert them into an offset in a page-aligned mapping, but the
282 * caller shouldn't need to know that small detail.
284 void __iomem *
285 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
286 unsigned int mtype)
288 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
289 __builtin_return_address(0));
291 EXPORT_SYMBOL(__arm_ioremap_pfn);
293 void __iomem *
294 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
296 return __arm_ioremap_caller(phys_addr, size, mtype,
297 __builtin_return_address(0));
299 EXPORT_SYMBOL(__arm_ioremap);
301 void __iounmap(volatile void __iomem *io_addr)
303 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
304 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
305 struct vm_struct **p, *tmp;
308 * If this is a section based mapping we need to handle it
309 * specially as the VM subsystem does not know how to handle
310 * such a beast. We need the lock here b/c we need to clear
311 * all the mappings before the area can be reclaimed
312 * by someone else.
314 write_lock(&vmlist_lock);
315 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
316 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
317 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
318 unmap_area_sections((unsigned long)tmp->addr,
319 tmp->size);
321 break;
324 write_unlock(&vmlist_lock);
325 #endif
327 vunmap(addr);
329 EXPORT_SYMBOL(__iounmap);