2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/init.h>
37 #include <linux/bootmem.h>
38 #include <linux/memblock.h>
39 #include <linux/slab.h>
41 #include <asm/pgalloc.h>
45 #include <asm/mmu_context.h>
46 #include <asm/pgtable.h>
49 #include <asm/machdep.h>
51 #include <asm/processor.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
54 #include <asm/firmware.h>
58 /* Some sanity checking */
59 #if TASK_SIZE_USER64 > PGTABLE_RANGE
60 #error TASK_SIZE_USER64 exceeds pagetable range
63 #ifdef CONFIG_PPC_STD_MMU_64
64 #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
65 #error TASK_SIZE_USER64 exceeds user VSID range
69 unsigned long ioremap_bot
= IOREMAP_BASE
;
71 #ifdef CONFIG_PPC_MMU_NOHASH
72 static void *early_alloc_pgtable(unsigned long size
)
76 if (init_bootmem_done
)
77 pt
= __alloc_bootmem(size
, size
, __pa(MAX_DMA_ADDRESS
));
79 pt
= __va(memblock_alloc_base(size
, size
,
80 __pa(MAX_DMA_ADDRESS
)));
85 #endif /* CONFIG_PPC_MMU_NOHASH */
88 * map_kernel_page currently only called by __ioremap
89 * map_kernel_page adds an entry to the ioremap page table
90 * and adds an entry to the HPT, possibly bolting it
92 int map_kernel_page(unsigned long ea
, unsigned long pa
, int flags
)
99 if (slab_is_available()) {
100 pgdp
= pgd_offset_k(ea
);
101 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
104 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
107 ptep
= pte_alloc_kernel(pmdp
, ea
);
110 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
,
113 #ifdef CONFIG_PPC_MMU_NOHASH
114 /* Warning ! This will blow up if bootmem is not initialized
115 * which our ppc64 code is keen to do that, we'll need to
116 * fix it and/or be more careful
118 pgdp
= pgd_offset_k(ea
);
119 #ifdef PUD_TABLE_SIZE
120 if (pgd_none(*pgdp
)) {
121 pudp
= early_alloc_pgtable(PUD_TABLE_SIZE
);
122 BUG_ON(pudp
== NULL
);
123 pgd_populate(&init_mm
, pgdp
, pudp
);
125 #endif /* PUD_TABLE_SIZE */
126 pudp
= pud_offset(pgdp
, ea
);
127 if (pud_none(*pudp
)) {
128 pmdp
= early_alloc_pgtable(PMD_TABLE_SIZE
);
129 BUG_ON(pmdp
== NULL
);
130 pud_populate(&init_mm
, pudp
, pmdp
);
132 pmdp
= pmd_offset(pudp
, ea
);
133 if (!pmd_present(*pmdp
)) {
134 ptep
= early_alloc_pgtable(PAGE_SIZE
);
135 BUG_ON(ptep
== NULL
);
136 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
138 ptep
= pte_offset_kernel(pmdp
, ea
);
139 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
,
141 #else /* CONFIG_PPC_MMU_NOHASH */
143 * If the mm subsystem is not fully up, we cannot create a
144 * linux page table entry for this mapping. Simply bolt an
145 * entry in the hardware page table.
148 if (htab_bolt_mapping(ea
, ea
+ PAGE_SIZE
, pa
, flags
,
149 mmu_io_psize
, mmu_kernel_ssize
)) {
150 printk(KERN_ERR
"Failed to do bolted mapping IO "
151 "memory at %016lx !\n", pa
);
154 #endif /* !CONFIG_PPC_MMU_NOHASH */
161 * __ioremap_at - Low level function to establish the page tables
164 void __iomem
* __ioremap_at(phys_addr_t pa
, void *ea
, unsigned long size
,
169 /* Make sure we have the base flags */
170 if ((flags
& _PAGE_PRESENT
) == 0)
171 flags
|= pgprot_val(PAGE_KERNEL
);
173 /* Non-cacheable page cannot be coherent */
174 if (flags
& _PAGE_NO_CACHE
)
175 flags
&= ~_PAGE_COHERENT
;
177 /* We don't support the 4K PFN hack with ioremap */
178 if (flags
& _PAGE_4K_PFN
)
181 WARN_ON(pa
& ~PAGE_MASK
);
182 WARN_ON(((unsigned long)ea
) & ~PAGE_MASK
);
183 WARN_ON(size
& ~PAGE_MASK
);
185 for (i
= 0; i
< size
; i
+= PAGE_SIZE
)
186 if (map_kernel_page((unsigned long)ea
+i
, pa
+i
, flags
))
189 return (void __iomem
*)ea
;
193 * __iounmap_from - Low level function to tear down the page tables
194 * for an IO mapping. This is used for mappings that
195 * are manipulated manually, like partial unmapping of
196 * PCI IOs or ISA space.
198 void __iounmap_at(void *ea
, unsigned long size
)
200 WARN_ON(((unsigned long)ea
) & ~PAGE_MASK
);
201 WARN_ON(size
& ~PAGE_MASK
);
203 unmap_kernel_range((unsigned long)ea
, size
);
206 void __iomem
* __ioremap_caller(phys_addr_t addr
, unsigned long size
,
207 unsigned long flags
, void *caller
)
209 phys_addr_t paligned
;
213 * Choose an address to map it to.
214 * Once the imalloc system is running, we use it.
215 * Before that, we map using addresses going
216 * up from ioremap_bot. imalloc will use
217 * the addresses from ioremap_bot through
221 paligned
= addr
& PAGE_MASK
;
222 size
= PAGE_ALIGN(addr
+ size
) - paligned
;
224 if ((size
== 0) || (paligned
== 0))
228 struct vm_struct
*area
;
230 area
= __get_vm_area_caller(size
, VM_IOREMAP
,
231 ioremap_bot
, IOREMAP_END
,
236 area
->phys_addr
= paligned
;
237 ret
= __ioremap_at(paligned
, area
->addr
, size
, flags
);
241 ret
= __ioremap_at(paligned
, (void *)ioremap_bot
, size
, flags
);
247 ret
+= addr
& ~PAGE_MASK
;
251 void __iomem
* __ioremap(phys_addr_t addr
, unsigned long size
,
254 return __ioremap_caller(addr
, size
, flags
, __builtin_return_address(0));
257 void __iomem
* ioremap(phys_addr_t addr
, unsigned long size
)
259 unsigned long flags
= _PAGE_NO_CACHE
| _PAGE_GUARDED
;
260 void *caller
= __builtin_return_address(0);
263 return ppc_md
.ioremap(addr
, size
, flags
, caller
);
264 return __ioremap_caller(addr
, size
, flags
, caller
);
267 void __iomem
* ioremap_wc(phys_addr_t addr
, unsigned long size
)
269 unsigned long flags
= _PAGE_NO_CACHE
;
270 void *caller
= __builtin_return_address(0);
273 return ppc_md
.ioremap(addr
, size
, flags
, caller
);
274 return __ioremap_caller(addr
, size
, flags
, caller
);
277 void __iomem
* ioremap_prot(phys_addr_t addr
, unsigned long size
,
280 void *caller
= __builtin_return_address(0);
282 /* writeable implies dirty for kernel addresses */
283 if (flags
& _PAGE_RW
)
284 flags
|= _PAGE_DIRTY
;
286 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
287 flags
&= ~(_PAGE_USER
| _PAGE_EXEC
);
290 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
291 * which means that we just cleared supervisor access... oops ;-) This
294 flags
|= _PAGE_BAP_SR
;
298 return ppc_md
.ioremap(addr
, size
, flags
, caller
);
299 return __ioremap_caller(addr
, size
, flags
, caller
);
304 * Unmap an IO region and remove it from imalloc'd list.
305 * Access to IO memory should be serialized by driver.
307 void __iounmap(volatile void __iomem
*token
)
314 addr
= (void *) ((unsigned long __force
)
315 PCI_FIX_ADDR(token
) & PAGE_MASK
);
316 if ((unsigned long)addr
< ioremap_bot
) {
317 printk(KERN_WARNING
"Attempt to iounmap early bolted mapping"
324 void iounmap(volatile void __iomem
*token
)
327 ppc_md
.iounmap(token
);
332 EXPORT_SYMBOL(ioremap
);
333 EXPORT_SYMBOL(ioremap_wc
);
334 EXPORT_SYMBOL(ioremap_prot
);
335 EXPORT_SYMBOL(__ioremap
);
336 EXPORT_SYMBOL(__ioremap_at
);
337 EXPORT_SYMBOL(iounmap
);
338 EXPORT_SYMBOL(__iounmap
);
339 EXPORT_SYMBOL(__iounmap_at
);