2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
26 #include <linux/vmalloc.h>
29 #include <asm/cputype.h>
30 #include <asm/cacheflush.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/sizes.h>
36 #include <asm/mach/map.h>
40 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
43 #define VM_ARM_SECTION_MAPPING 0x80000000
45 static int remap_area_pte(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
46 unsigned long phys_addr
, const struct mem_type
*type
)
48 pgprot_t prot
= __pgprot(type
->prot_pte
);
51 pte
= pte_alloc_kernel(pmd
, addr
);
59 set_pte_ext(pte
, pfn_pte(phys_addr
>> PAGE_SHIFT
, prot
), 0);
60 phys_addr
+= PAGE_SIZE
;
61 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
65 printk(KERN_CRIT
"remap_area_pte: page already exists\n");
69 static inline int remap_area_pmd(pgd_t
*pgd
, unsigned long addr
,
70 unsigned long end
, unsigned long phys_addr
,
71 const struct mem_type
*type
)
77 pmd
= pmd_alloc(&init_mm
, pgd
, addr
);
82 next
= pmd_addr_end(addr
, end
);
83 ret
= remap_area_pte(pmd
, addr
, next
, phys_addr
, type
);
86 phys_addr
+= next
- addr
;
87 } while (pmd
++, addr
= next
, addr
!= end
);
91 static int remap_area_pages(unsigned long start
, unsigned long pfn
,
92 size_t size
, const struct mem_type
*type
)
94 unsigned long addr
= start
;
95 unsigned long next
, end
= start
+ size
;
96 unsigned long phys_addr
= __pfn_to_phys(pfn
);
101 pgd
= pgd_offset_k(addr
);
103 next
= pgd_addr_end(addr
, end
);
104 err
= remap_area_pmd(pgd
, addr
, next
, phys_addr
, type
);
107 phys_addr
+= next
- addr
;
108 } while (pgd
++, addr
= next
, addr
!= end
);
113 int ioremap_page(unsigned long virt
, unsigned long phys
,
114 const struct mem_type
*mtype
)
116 return remap_area_pages(virt
, __phys_to_pfn(phys
), PAGE_SIZE
, mtype
);
118 EXPORT_SYMBOL(ioremap_page
);
120 void __check_kvm_seq(struct mm_struct
*mm
)
125 seq
= init_mm
.context
.kvm_seq
;
126 memcpy(pgd_offset(mm
, VMALLOC_START
),
127 pgd_offset_k(VMALLOC_START
),
128 sizeof(pgd_t
) * (pgd_index(VMALLOC_END
) -
129 pgd_index(VMALLOC_START
)));
130 mm
->context
.kvm_seq
= seq
;
131 } while (seq
!= init_mm
.context
.kvm_seq
);
136 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
137 * the other CPUs will not see this change until their next context switch.
138 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
139 * which requires the new ioremap'd region to be referenced, the CPU will
140 * reference the _old_ region.
142 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
143 * the size back to 1MB aligned or we will overflow in the loop below.
145 static void unmap_area_sections(unsigned long virt
, unsigned long size
)
147 unsigned long addr
= virt
, end
= virt
+ (size
& ~(SZ_1M
- 1));
150 flush_cache_vunmap(addr
, end
);
151 pgd
= pgd_offset_k(addr
);
153 pmd_t pmd
, *pmdp
= pmd_offset(pgd
, addr
);
156 if (!pmd_none(pmd
)) {
158 * Clear the PMD from the page table, and
159 * increment the kvm sequence so others
160 * notice this change.
162 * Note: this is still racy on SMP machines.
165 init_mm
.context
.kvm_seq
++;
168 * Free the page table, if there was one.
170 if ((pmd_val(pmd
) & PMD_TYPE_MASK
) == PMD_TYPE_TABLE
)
171 pte_free_kernel(&init_mm
, pmd_page_vaddr(pmd
));
176 } while (addr
< end
);
179 * Ensure that the active_mm is up to date - we want to
180 * catch any use-after-iounmap cases.
182 if (current
->active_mm
->context
.kvm_seq
!= init_mm
.context
.kvm_seq
)
183 __check_kvm_seq(current
->active_mm
);
185 flush_tlb_kernel_range(virt
, end
);
189 remap_area_sections(unsigned long virt
, unsigned long pfn
,
190 size_t size
, const struct mem_type
*type
)
192 unsigned long addr
= virt
, end
= virt
+ size
;
196 * Remove and free any PTE-based mapping, and
197 * sync the current kernel mapping.
199 unmap_area_sections(virt
, size
);
201 pgd
= pgd_offset_k(addr
);
203 pmd_t
*pmd
= pmd_offset(pgd
, addr
);
205 pmd
[0] = __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
);
206 pfn
+= SZ_1M
>> PAGE_SHIFT
;
207 pmd
[1] = __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
);
208 pfn
+= SZ_1M
>> PAGE_SHIFT
;
209 flush_pmd_entry(pmd
);
213 } while (addr
< end
);
219 remap_area_supersections(unsigned long virt
, unsigned long pfn
,
220 size_t size
, const struct mem_type
*type
)
222 unsigned long addr
= virt
, end
= virt
+ size
;
226 * Remove and free any PTE-based mapping, and
227 * sync the current kernel mapping.
229 unmap_area_sections(virt
, size
);
231 pgd
= pgd_offset_k(virt
);
233 unsigned long super_pmd_val
, i
;
235 super_pmd_val
= __pfn_to_phys(pfn
) | type
->prot_sect
|
237 super_pmd_val
|= ((pfn
>> (32 - PAGE_SHIFT
)) & 0xf) << 20;
239 for (i
= 0; i
< 8; i
++) {
240 pmd_t
*pmd
= pmd_offset(pgd
, addr
);
242 pmd
[0] = __pmd(super_pmd_val
);
243 pmd
[1] = __pmd(super_pmd_val
);
244 flush_pmd_entry(pmd
);
250 pfn
+= SUPERSECTION_SIZE
>> PAGE_SHIFT
;
251 } while (addr
< end
);
259 * Remap an arbitrary physical address space into the kernel virtual
260 * address space. Needed when the kernel wants to access high addresses
263 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
264 * have to convert them into an offset in a page-aligned mapping, but the
265 * caller shouldn't need to know that small detail.
267 * 'flags' are the extra L_PTE_ flags that you want to specify for this
268 * mapping. See <asm/pgtable.h> for more information.
271 __arm_ioremap_pfn(unsigned long pfn
, unsigned long offset
, size_t size
,
274 const struct mem_type
*type
;
277 struct vm_struct
* area
;
280 * High mappings must be supersection aligned
282 if (pfn
>= 0x100000 && (__pfn_to_phys(pfn
) & ~SUPERSECTION_MASK
))
285 type
= get_mem_type(mtype
);
290 * Page align the mapping size, taking account of any offset.
292 size
= PAGE_ALIGN(offset
+ size
);
294 area
= get_vm_area(size
, VM_IOREMAP
);
297 addr
= (unsigned long)area
->addr
;
300 if (DOMAIN_IO
== 0 &&
301 (((cpu_architecture() >= CPU_ARCH_ARMv6
) && (get_cr() & CR_XP
)) ||
302 cpu_is_xsc3()) && pfn
>= 0x100000 &&
303 !((__pfn_to_phys(pfn
) | size
| addr
) & ~SUPERSECTION_MASK
)) {
304 area
->flags
|= VM_ARM_SECTION_MAPPING
;
305 err
= remap_area_supersections(addr
, pfn
, size
, type
);
306 } else if (!((__pfn_to_phys(pfn
) | size
| addr
) & ~PMD_MASK
)) {
307 area
->flags
|= VM_ARM_SECTION_MAPPING
;
308 err
= remap_area_sections(addr
, pfn
, size
, type
);
311 err
= remap_area_pages(addr
, pfn
, size
, type
);
314 vunmap((void *)addr
);
318 flush_cache_vmap(addr
, addr
+ size
);
319 return (void __iomem
*) (offset
+ addr
);
321 EXPORT_SYMBOL(__arm_ioremap_pfn
);
324 __arm_ioremap(unsigned long phys_addr
, size_t size
, unsigned int mtype
)
326 unsigned long last_addr
;
327 unsigned long offset
= phys_addr
& ~PAGE_MASK
;
328 unsigned long pfn
= __phys_to_pfn(phys_addr
);
331 * Don't allow wraparound or zero size
333 last_addr
= phys_addr
+ size
- 1;
334 if (!size
|| last_addr
< phys_addr
)
337 return __arm_ioremap_pfn(pfn
, offset
, size
, mtype
);
339 EXPORT_SYMBOL(__arm_ioremap
);
341 void __iounmap(volatile void __iomem
*io_addr
)
343 void *addr
= (void *)(PAGE_MASK
& (unsigned long)io_addr
);
345 struct vm_struct
**p
, *tmp
;
348 * If this is a section based mapping we need to handle it
349 * specially as the VM subsystem does not know how to handle
350 * such a beast. We need the lock here b/c we need to clear
351 * all the mappings before the area can be reclaimed
354 write_lock(&vmlist_lock
);
355 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
356 if ((tmp
->flags
& VM_IOREMAP
) && (tmp
->addr
== addr
)) {
357 if (tmp
->flags
& VM_ARM_SECTION_MAPPING
) {
358 unmap_area_sections((unsigned long)tmp
->addr
,
364 write_unlock(&vmlist_lock
);
369 EXPORT_SYMBOL(__iounmap
);