2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
26 #include <linux/vmalloc.h>
28 #include <linux/sizes.h>
31 #include <asm/cputype.h>
32 #include <asm/cacheflush.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlbflush.h>
36 #include <asm/system_info.h>
38 #include <asm/mach/map.h>
39 #include <asm/mach/pci.h>
42 int ioremap_page(unsigned long virt
, unsigned long phys
,
43 const struct mem_type
*mtype
)
45 return ioremap_page_range(virt
, virt
+ PAGE_SIZE
, phys
,
46 __pgprot(mtype
->prot_pte
));
48 EXPORT_SYMBOL(ioremap_page
);
50 void __check_vmalloc_seq(struct mm_struct
*mm
)
55 seq
= init_mm
.context
.vmalloc_seq
;
56 memcpy(pgd_offset(mm
, VMALLOC_START
),
57 pgd_offset_k(VMALLOC_START
),
58 sizeof(pgd_t
) * (pgd_index(VMALLOC_END
) -
59 pgd_index(VMALLOC_START
)));
60 mm
->context
.vmalloc_seq
= seq
;
61 } while (seq
!= init_mm
.context
.vmalloc_seq
);
64 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
66 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
67 * the other CPUs will not see this change until their next context switch.
68 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
69 * which requires the new ioremap'd region to be referenced, the CPU will
70 * reference the _old_ region.
72 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
73 * mask the size back to 1MB aligned or we will overflow in the loop below.
75 static void unmap_area_sections(unsigned long virt
, unsigned long size
)
77 unsigned long addr
= virt
, end
= virt
+ (size
& ~(SZ_1M
- 1));
82 flush_cache_vunmap(addr
, end
);
83 pgd
= pgd_offset_k(addr
);
84 pud
= pud_offset(pgd
, addr
);
85 pmdp
= pmd_offset(pud
, addr
);
91 * Clear the PMD from the page table, and
92 * increment the vmalloc sequence so others
95 * Note: this is still racy on SMP machines.
98 init_mm
.context
.vmalloc_seq
++;
101 * Free the page table, if there was one.
103 if ((pmd_val(pmd
) & PMD_TYPE_MASK
) == PMD_TYPE_TABLE
)
104 pte_free_kernel(&init_mm
, pmd_page_vaddr(pmd
));
109 } while (addr
< end
);
112 * Ensure that the active_mm is up to date - we want to
113 * catch any use-after-iounmap cases.
115 if (current
->active_mm
->context
.vmalloc_seq
!= init_mm
.context
.vmalloc_seq
)
116 __check_vmalloc_seq(current
->active_mm
);
118 flush_tlb_kernel_range(virt
, end
);
122 remap_area_sections(unsigned long virt
, unsigned long pfn
,
123 size_t size
, const struct mem_type
*type
)
125 unsigned long addr
= virt
, end
= virt
+ size
;
131 * Remove and free any PTE-based mapping, and
132 * sync the current kernel mapping.
134 unmap_area_sections(virt
, size
);
136 pgd
= pgd_offset_k(addr
);
137 pud
= pud_offset(pgd
, addr
);
138 pmd
= pmd_offset(pud
, addr
);
140 pmd
[0] = __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
);
141 pfn
+= SZ_1M
>> PAGE_SHIFT
;
142 pmd
[1] = __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
);
143 pfn
+= SZ_1M
>> PAGE_SHIFT
;
144 flush_pmd_entry(pmd
);
148 } while (addr
< end
);
154 remap_area_supersections(unsigned long virt
, unsigned long pfn
,
155 size_t size
, const struct mem_type
*type
)
157 unsigned long addr
= virt
, end
= virt
+ size
;
163 * Remove and free any PTE-based mapping, and
164 * sync the current kernel mapping.
166 unmap_area_sections(virt
, size
);
168 pgd
= pgd_offset_k(virt
);
169 pud
= pud_offset(pgd
, addr
);
170 pmd
= pmd_offset(pud
, addr
);
172 unsigned long super_pmd_val
, i
;
174 super_pmd_val
= __pfn_to_phys(pfn
) | type
->prot_sect
|
176 super_pmd_val
|= ((pfn
>> (32 - PAGE_SHIFT
)) & 0xf) << 20;
178 for (i
= 0; i
< 8; i
++) {
179 pmd
[0] = __pmd(super_pmd_val
);
180 pmd
[1] = __pmd(super_pmd_val
);
181 flush_pmd_entry(pmd
);
187 pfn
+= SUPERSECTION_SIZE
>> PAGE_SHIFT
;
188 } while (addr
< end
);
194 void __iomem
* __arm_ioremap_pfn_caller(unsigned long pfn
,
195 unsigned long offset
, size_t size
, unsigned int mtype
, void *caller
)
197 const struct mem_type
*type
;
200 struct vm_struct
* area
;
202 #ifndef CONFIG_ARM_LPAE
204 * High mappings must be supersection aligned
206 if (pfn
>= 0x100000 && (__pfn_to_phys(pfn
) & ~SUPERSECTION_MASK
))
210 type
= get_mem_type(mtype
);
215 * Page align the mapping size, taking account of any offset.
217 size
= PAGE_ALIGN(offset
+ size
);
220 * Try to reuse one of the static mapping whenever possible.
222 read_lock(&vmlist_lock
);
223 for (area
= vmlist
; area
; area
= area
->next
) {
224 if (!size
|| (sizeof(phys_addr_t
) == 4 && pfn
>= 0x100000))
226 if (!(area
->flags
& VM_ARM_STATIC_MAPPING
))
228 if ((area
->flags
& VM_ARM_MTYPE_MASK
) != VM_ARM_MTYPE(mtype
))
230 if (__phys_to_pfn(area
->phys_addr
) > pfn
||
231 __pfn_to_phys(pfn
) + size
-1 > area
->phys_addr
+ area
->size
-1)
233 /* we can drop the lock here as we know *area is static */
234 read_unlock(&vmlist_lock
);
235 addr
= (unsigned long)area
->addr
;
236 addr
+= __pfn_to_phys(pfn
) - area
->phys_addr
;
237 return (void __iomem
*) (offset
+ addr
);
239 read_unlock(&vmlist_lock
);
242 * Don't allow RAM to be mapped - this causes problems with ARMv6+
244 if (WARN_ON(pfn_valid(pfn
)))
247 area
= get_vm_area_caller(size
, VM_IOREMAP
, caller
);
250 addr
= (unsigned long)area
->addr
;
251 area
->phys_addr
= __pfn_to_phys(pfn
);
253 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
254 if (DOMAIN_IO
== 0 &&
255 (((cpu_architecture() >= CPU_ARCH_ARMv6
) && (get_cr() & CR_XP
)) ||
256 cpu_is_xsc3()) && pfn
>= 0x100000 &&
257 !((__pfn_to_phys(pfn
) | size
| addr
) & ~SUPERSECTION_MASK
)) {
258 area
->flags
|= VM_ARM_SECTION_MAPPING
;
259 err
= remap_area_supersections(addr
, pfn
, size
, type
);
260 } else if (!((__pfn_to_phys(pfn
) | size
| addr
) & ~PMD_MASK
)) {
261 area
->flags
|= VM_ARM_SECTION_MAPPING
;
262 err
= remap_area_sections(addr
, pfn
, size
, type
);
265 err
= ioremap_page_range(addr
, addr
+ size
, __pfn_to_phys(pfn
),
266 __pgprot(type
->prot_pte
));
269 vunmap((void *)addr
);
273 flush_cache_vmap(addr
, addr
+ size
);
274 return (void __iomem
*) (offset
+ addr
);
277 void __iomem
*__arm_ioremap_caller(unsigned long phys_addr
, size_t size
,
278 unsigned int mtype
, void *caller
)
280 unsigned long last_addr
;
281 unsigned long offset
= phys_addr
& ~PAGE_MASK
;
282 unsigned long pfn
= __phys_to_pfn(phys_addr
);
285 * Don't allow wraparound or zero size
287 last_addr
= phys_addr
+ size
- 1;
288 if (!size
|| last_addr
< phys_addr
)
291 return __arm_ioremap_pfn_caller(pfn
, offset
, size
, mtype
,
296 * Remap an arbitrary physical address space into the kernel virtual
297 * address space. Needed when the kernel wants to access high addresses
300 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
301 * have to convert them into an offset in a page-aligned mapping, but the
302 * caller shouldn't need to know that small detail.
305 __arm_ioremap_pfn(unsigned long pfn
, unsigned long offset
, size_t size
,
308 return __arm_ioremap_pfn_caller(pfn
, offset
, size
, mtype
,
309 __builtin_return_address(0));
311 EXPORT_SYMBOL(__arm_ioremap_pfn
);
313 void __iomem
* (*arch_ioremap_caller
)(unsigned long, size_t,
314 unsigned int, void *) =
315 __arm_ioremap_caller
;
318 __arm_ioremap(unsigned long phys_addr
, size_t size
, unsigned int mtype
)
320 return arch_ioremap_caller(phys_addr
, size
, mtype
,
321 __builtin_return_address(0));
323 EXPORT_SYMBOL(__arm_ioremap
);
326 * Remap an arbitrary physical address space into the kernel virtual
327 * address space as memory. Needed when the kernel wants to execute
328 * code in external memory. This is needed for reprogramming source
329 * clocks that would affect normal memory for example. Please see
330 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
333 __arm_ioremap_exec(unsigned long phys_addr
, size_t size
, bool cached
)
340 mtype
= MT_MEMORY_NONCACHED
;
342 return __arm_ioremap_caller(phys_addr
, size
, mtype
,
343 __builtin_return_address(0));
346 void __iounmap(volatile void __iomem
*io_addr
)
348 void *addr
= (void *)(PAGE_MASK
& (unsigned long)io_addr
);
349 struct vm_struct
*vm
;
351 read_lock(&vmlist_lock
);
352 for (vm
= vmlist
; vm
; vm
= vm
->next
) {
355 if (!(vm
->flags
& VM_IOREMAP
))
357 /* If this is a static mapping we must leave it alone */
358 if ((vm
->flags
& VM_ARM_STATIC_MAPPING
) &&
359 (vm
->addr
<= addr
) && (vm
->addr
+ vm
->size
> addr
)) {
360 read_unlock(&vmlist_lock
);
363 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
365 * If this is a section based mapping we need to handle it
366 * specially as the VM subsystem does not know how to handle
369 if ((vm
->addr
== addr
) &&
370 (vm
->flags
& VM_ARM_SECTION_MAPPING
)) {
371 unmap_area_sections((unsigned long)vm
->addr
, vm
->size
);
376 read_unlock(&vmlist_lock
);
381 void (*arch_iounmap
)(volatile void __iomem
*) = __iounmap
;
383 void __arm_iounmap(volatile void __iomem
*io_addr
)
385 arch_iounmap(io_addr
);
387 EXPORT_SYMBOL(__arm_iounmap
);
390 int pci_ioremap_io(unsigned int offset
, phys_addr_t phys_addr
)
392 BUG_ON(offset
+ SZ_64K
> IO_SPACE_LIMIT
);
394 return ioremap_page_range(PCI_IO_VIRT_BASE
+ offset
,
395 PCI_IO_VIRT_BASE
+ offset
+ SZ_64K
,
397 __pgprot(get_mem_type(MT_DEVICE
)->prot_pte
));
399 EXPORT_SYMBOL_GPL(pci_ioremap_io
);