2 * linux/arch/unicore32/mm/ioremap.c
4 * Code specific to PKUnity SoC and UniCore ISA
6 * Copyright (C) 2001-2010 GUAN Xue-tao
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 * Re-map IO memory to kernel address space so that we can access it.
15 * This allows a driver to remap an arbitrary region of bus memory into
16 * virtual space. One should *only* use readl, writel, memcpy_toio and
17 * so on with such remapped areas.
19 * Because UniCore only has a 32-bit address space we can't address the
20 * whole of the (physical) PCI space at once. PCI huge-mode addressing
21 * allows us to circumvent this restriction by splitting PCI space into
22 * two 2GB chunks and mapping only one at a time into processor memory.
23 * We use MMU protection domains to trap any attempt to access the bank
24 * that is not currently mapped. (This isn't fully implemented yet.)
26 #include <linux/module.h>
27 #include <linux/errno.h>
29 #include <linux/vmalloc.h>
32 #include <asm/cputype.h>
33 #include <asm/cacheflush.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sizes.h>
43 * Used by ioremap() and iounmap() code to mark (super)section-mapped
44 * I/O regions in vm_struct->flags field.
46 #define VM_UNICORE_SECTION_MAPPING 0x80000000
48 int ioremap_page(unsigned long virt
, unsigned long phys
,
49 const struct mem_type
*mtype
)
51 return ioremap_page_range(virt
, virt
+ PAGE_SIZE
, phys
,
52 __pgprot(mtype
->prot_pte
));
54 EXPORT_SYMBOL(ioremap_page
);
57 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
58 * the other CPUs will not see this change until their next context switch.
59 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
60 * which requires the new ioremap'd region to be referenced, the CPU will
61 * reference the _old_ region.
63 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
64 * mask the size back to 4MB aligned or we will overflow in the loop below.
66 static void unmap_area_sections(unsigned long virt
, unsigned long size
)
68 unsigned long addr
= virt
, end
= virt
+ (size
& ~(SZ_4M
- 1));
71 flush_cache_vunmap(addr
, end
);
72 pgd
= pgd_offset_k(addr
);
74 pmd_t pmd
, *pmdp
= pmd_offset((pud_t
*)pgd
, addr
);
79 * Clear the PMD from the page table, and
80 * increment the kvm sequence so others
83 * Note: this is still racy on SMP machines.
88 * Free the page table, if there was one.
90 if ((pmd_val(pmd
) & PMD_TYPE_MASK
) == PMD_TYPE_TABLE
)
91 pte_free_kernel(&init_mm
, pmd_page_vaddr(pmd
));
98 flush_tlb_kernel_range(virt
, end
);
102 remap_area_sections(unsigned long virt
, unsigned long pfn
,
103 size_t size
, const struct mem_type
*type
)
105 unsigned long addr
= virt
, end
= virt
+ size
;
109 * Remove and free any PTE-based mapping, and
110 * sync the current kernel mapping.
112 unmap_area_sections(virt
, size
);
114 pgd
= pgd_offset_k(addr
);
116 pmd_t
*pmd
= pmd_offset((pud_t
*)pgd
, addr
);
118 set_pmd(pmd
, __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
));
119 pfn
+= SZ_4M
>> PAGE_SHIFT
;
120 flush_pmd_entry(pmd
);
124 } while (addr
< end
);
129 void __iomem
*__uc32_ioremap_pfn_caller(unsigned long pfn
,
130 unsigned long offset
, size_t size
, unsigned int mtype
, void *caller
)
132 const struct mem_type
*type
;
135 struct vm_struct
*area
;
138 * High mappings must be section aligned
140 if (pfn
>= 0x100000 && (__pfn_to_phys(pfn
) & ~SECTION_MASK
))
144 * Don't allow RAM to be mapped
146 if (pfn_valid(pfn
)) {
147 printk(KERN_WARNING
"BUG: Your driver calls ioremap() on\n"
148 "system memory. This leads to architecturally\n"
149 "unpredictable behaviour, and ioremap() will fail in\n"
150 "the next kernel release. Please fix your driver.\n");
154 type
= get_mem_type(mtype
);
159 * Page align the mapping size, taking account of any offset.
161 size
= PAGE_ALIGN(offset
+ size
);
163 area
= get_vm_area_caller(size
, VM_IOREMAP
, caller
);
166 addr
= (unsigned long)area
->addr
;
168 if (!((__pfn_to_phys(pfn
) | size
| addr
) & ~PMD_MASK
)) {
169 area
->flags
|= VM_UNICORE_SECTION_MAPPING
;
170 err
= remap_area_sections(addr
, pfn
, size
, type
);
172 err
= ioremap_page_range(addr
, addr
+ size
, __pfn_to_phys(pfn
),
173 __pgprot(type
->prot_pte
));
176 vunmap((void *)addr
);
180 flush_cache_vmap(addr
, addr
+ size
);
181 return (void __iomem
*) (offset
+ addr
);
184 void __iomem
*__uc32_ioremap_caller(unsigned long phys_addr
, size_t size
,
185 unsigned int mtype
, void *caller
)
187 unsigned long last_addr
;
188 unsigned long offset
= phys_addr
& ~PAGE_MASK
;
189 unsigned long pfn
= __phys_to_pfn(phys_addr
);
192 * Don't allow wraparound or zero size
194 last_addr
= phys_addr
+ size
- 1;
195 if (!size
|| last_addr
< phys_addr
)
198 return __uc32_ioremap_pfn_caller(pfn
, offset
, size
, mtype
, caller
);
202 * Remap an arbitrary physical address space into the kernel virtual
203 * address space. Needed when the kernel wants to access high addresses
206 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
207 * have to convert them into an offset in a page-aligned mapping, but the
208 * caller shouldn't need to know that small detail.
211 __uc32_ioremap_pfn(unsigned long pfn
, unsigned long offset
, size_t size
,
214 return __uc32_ioremap_pfn_caller(pfn
, offset
, size
, mtype
,
215 __builtin_return_address(0));
217 EXPORT_SYMBOL(__uc32_ioremap_pfn
);
220 __uc32_ioremap(unsigned long phys_addr
, size_t size
)
222 return __uc32_ioremap_caller(phys_addr
, size
, MT_DEVICE
,
223 __builtin_return_address(0));
225 EXPORT_SYMBOL(__uc32_ioremap
);
228 __uc32_ioremap_cached(unsigned long phys_addr
, size_t size
)
230 return __uc32_ioremap_caller(phys_addr
, size
, MT_DEVICE_CACHED
,
231 __builtin_return_address(0));
233 EXPORT_SYMBOL(__uc32_ioremap_cached
);
235 void __uc32_iounmap(volatile void __iomem
*io_addr
)
237 void *addr
= (void *)(PAGE_MASK
& (unsigned long)io_addr
);
238 struct vm_struct
**p
, *tmp
;
241 * If this is a section based mapping we need to handle it
242 * specially as the VM subsystem does not know how to handle
243 * such a beast. We need the lock here b/c we need to clear
244 * all the mappings before the area can be reclaimed
247 write_lock(&vmlist_lock
);
248 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
249 if ((tmp
->flags
& VM_IOREMAP
) && (tmp
->addr
== addr
)) {
250 if (tmp
->flags
& VM_UNICORE_SECTION_MAPPING
) {
251 unmap_area_sections((unsigned long)tmp
->addr
,
257 write_unlock(&vmlist_lock
);
261 EXPORT_SYMBOL(__uc32_iounmap
);