2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 int ioremap_change_attr(unsigned long vaddr
, unsigned long size
,
32 unsigned long prot_val
)
34 unsigned long nrpages
= size
>> PAGE_SHIFT
;
40 err
= _set_memory_uc(vaddr
, nrpages
);
43 err
= _set_memory_wc(vaddr
, nrpages
);
46 err
= _set_memory_wb(vaddr
, nrpages
);
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
62 static void __iomem
*__ioremap_caller(resource_size_t phys_addr
,
63 unsigned long size
, unsigned long prot_val
, void *caller
)
65 unsigned long offset
, vaddr
;
66 resource_size_t pfn
, last_pfn
, last_addr
;
67 const resource_size_t unaligned_phys_addr
= phys_addr
;
68 const unsigned long unaligned_size
= size
;
69 struct vm_struct
*area
;
70 unsigned long new_prot_val
;
73 void __iomem
*ret_addr
;
75 /* Don't allow wraparound or zero size */
76 last_addr
= phys_addr
+ size
- 1;
77 if (!size
|| last_addr
< phys_addr
)
80 if (!phys_addr_valid(phys_addr
)) {
81 printk(KERN_WARNING
"ioremap: invalid physical address %llx\n",
82 (unsigned long long)phys_addr
);
88 * Don't remap the low PCI/ISA area, it's always mapped..
90 if (is_ISA_range(phys_addr
, last_addr
))
91 return (__force
void __iomem
*)phys_to_virt(phys_addr
);
94 * Check if the request spans more than any BAR in the iomem resource
97 WARN_ONCE(iomem_map_sanity_check(phys_addr
, size
),
98 KERN_INFO
"Info: mapping multiple BARs. Your kernel is fine.");
101 * Don't allow anybody to remap normal RAM that we're using..
103 last_pfn
= last_addr
>> PAGE_SHIFT
;
104 for (pfn
= phys_addr
>> PAGE_SHIFT
; pfn
<= last_pfn
; pfn
++) {
105 int is_ram
= page_is_ram(pfn
);
107 if (is_ram
&& pfn_valid(pfn
) && !PageReserved(pfn_to_page(pfn
)))
109 WARN_ON_ONCE(is_ram
);
113 * Mappings have to be page-aligned
115 offset
= phys_addr
& ~PAGE_MASK
;
116 phys_addr
&= PHYSICAL_PAGE_MASK
;
117 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
119 retval
= reserve_memtype(phys_addr
, (u64
)phys_addr
+ size
,
120 prot_val
, &new_prot_val
);
122 printk(KERN_ERR
"ioremap reserve_memtype failed %d\n", retval
);
126 if (prot_val
!= new_prot_val
) {
127 if (!is_new_memtype_allowed(phys_addr
, size
,
128 prot_val
, new_prot_val
)) {
130 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
131 (unsigned long long)phys_addr
,
132 (unsigned long long)(phys_addr
+ size
),
133 prot_val
, new_prot_val
);
134 goto err_free_memtype
;
136 prot_val
= new_prot_val
;
142 prot
= PAGE_KERNEL_IO_NOCACHE
;
144 case _PAGE_CACHE_UC_MINUS
:
145 prot
= PAGE_KERNEL_IO_UC_MINUS
;
148 prot
= PAGE_KERNEL_IO_WC
;
151 prot
= PAGE_KERNEL_IO
;
158 area
= get_vm_area_caller(size
, VM_IOREMAP
, caller
);
160 goto err_free_memtype
;
161 area
->phys_addr
= phys_addr
;
162 vaddr
= (unsigned long) area
->addr
;
164 if (kernel_map_sync_memtype(phys_addr
, size
, prot_val
))
167 if (ioremap_page_range(vaddr
, vaddr
+ size
, phys_addr
, prot
))
170 ret_addr
= (void __iomem
*) (vaddr
+ offset
);
171 mmiotrace_ioremap(unaligned_phys_addr
, unaligned_size
, ret_addr
);
177 free_memtype(phys_addr
, phys_addr
+ size
);
182 * ioremap_nocache - map bus memory into CPU space
183 * @offset: bus address of the memory
184 * @size: size of the resource to map
186 * ioremap_nocache performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
192 * This version of ioremap ensures that the memory is marked uncachable
193 * on the CPU as well as honouring existing caching rules from things like
194 * the PCI bus. Note that there are other caches and buffers on many
195 * busses. In particular driver authors should read up on PCI writes
197 * It's useful if some control registers are in such an area and
198 * write combining or read caching is not desirable:
200 * Must be freed with iounmap.
202 void __iomem
*ioremap_nocache(resource_size_t phys_addr
, unsigned long size
)
205 * Ideally, this should be:
206 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
208 * Till we fix all X drivers to use ioremap_wc(), we will use
211 unsigned long val
= _PAGE_CACHE_UC_MINUS
;
213 return __ioremap_caller(phys_addr
, size
, val
,
214 __builtin_return_address(0));
216 EXPORT_SYMBOL(ioremap_nocache
);
219 * ioremap_wc - map memory into CPU space write combined
220 * @offset: bus address of the memory
221 * @size: size of the resource to map
223 * This version of ioremap ensures that the memory is marked write combining.
224 * Write combining allows faster writes to some hardware devices.
226 * Must be freed with iounmap.
228 void __iomem
*ioremap_wc(resource_size_t phys_addr
, unsigned long size
)
231 return __ioremap_caller(phys_addr
, size
, _PAGE_CACHE_WC
,
232 __builtin_return_address(0));
234 return ioremap_nocache(phys_addr
, size
);
236 EXPORT_SYMBOL(ioremap_wc
);
238 void __iomem
*ioremap_cache(resource_size_t phys_addr
, unsigned long size
)
240 return __ioremap_caller(phys_addr
, size
, _PAGE_CACHE_WB
,
241 __builtin_return_address(0));
243 EXPORT_SYMBOL(ioremap_cache
);
245 void __iomem
*ioremap_prot(resource_size_t phys_addr
, unsigned long size
,
246 unsigned long prot_val
)
248 return __ioremap_caller(phys_addr
, size
, (prot_val
& _PAGE_CACHE_MASK
),
249 __builtin_return_address(0));
251 EXPORT_SYMBOL(ioremap_prot
);
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
257 * Caller must ensure there is only one unmapping for the same pointer.
259 void iounmap(volatile void __iomem
*addr
)
261 struct vm_struct
*p
, *o
;
263 if ((void __force
*)addr
<= high_memory
)
267 * __ioremap special-cases the PCI/ISA range by not instantiating a
268 * vm_area and by simply returning an address into the kernel mapping
269 * of ISA space. So handle that here.
271 if ((void __force
*)addr
>= phys_to_virt(ISA_START_ADDRESS
) &&
272 (void __force
*)addr
< phys_to_virt(ISA_END_ADDRESS
))
275 addr
= (volatile void __iomem
*)
276 (PAGE_MASK
& (unsigned long __force
)addr
);
278 mmiotrace_iounmap(addr
);
280 /* Use the vm area unlocked, assuming the caller
281 ensures there isn't another iounmap for the same address
282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */
285 read_lock(&vmlist_lock
);
286 for (p
= vmlist
; p
; p
= p
->next
) {
287 if (p
->addr
== (void __force
*)addr
)
290 read_unlock(&vmlist_lock
);
293 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
298 free_memtype(p
->phys_addr
, p
->phys_addr
+ get_vm_area_size(p
));
300 /* Finally remove it */
301 o
= remove_vm_area((void __force
*)addr
);
302 BUG_ON(p
!= o
|| o
== NULL
);
305 EXPORT_SYMBOL(iounmap
);
308 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
311 void *xlate_dev_mem_ptr(unsigned long phys
)
314 unsigned long start
= phys
& PAGE_MASK
;
316 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
317 if (page_is_ram(start
>> PAGE_SHIFT
))
320 addr
= (void __force
*)ioremap_cache(start
, PAGE_SIZE
);
322 addr
= (void *)((unsigned long)addr
| (phys
& ~PAGE_MASK
));
327 void unxlate_dev_mem_ptr(unsigned long phys
, void *addr
)
329 if (page_is_ram(phys
>> PAGE_SHIFT
))
332 iounmap((void __iomem
*)((unsigned long)addr
& PAGE_MASK
));
336 static int __initdata early_ioremap_debug
;
338 static int __init
early_ioremap_debug_setup(char *str
)
340 early_ioremap_debug
= 1;
344 early_param("early_ioremap_debug", early_ioremap_debug_setup
);
346 static __initdata
int after_paging_init
;
347 static pte_t bm_pte
[PAGE_SIZE
/sizeof(pte_t
)] __page_aligned_bss
;
349 static inline pmd_t
* __init
early_ioremap_pmd(unsigned long addr
)
351 /* Don't assume we're using swapper_pg_dir at this point */
352 pgd_t
*base
= __va(read_cr3());
353 pgd_t
*pgd
= &base
[pgd_index(addr
)];
354 pud_t
*pud
= pud_offset(pgd
, addr
);
355 pmd_t
*pmd
= pmd_offset(pud
, addr
);
360 static inline pte_t
* __init
early_ioremap_pte(unsigned long addr
)
362 return &bm_pte
[pte_index(addr
)];
365 bool __init
is_early_ioremap_ptep(pte_t
*ptep
)
367 return ptep
>= &bm_pte
[0] && ptep
< &bm_pte
[PAGE_SIZE
/sizeof(pte_t
)];
370 static unsigned long slot_virt
[FIX_BTMAPS_SLOTS
] __initdata
;
372 void __init
early_ioremap_init(void)
377 if (early_ioremap_debug
)
378 printk(KERN_INFO
"early_ioremap_init()\n");
380 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
381 slot_virt
[i
] = __fix_to_virt(FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*i
);
383 pmd
= early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
));
384 memset(bm_pte
, 0, sizeof(bm_pte
));
385 pmd_populate_kernel(&init_mm
, pmd
, bm_pte
);
388 * The boot-ioremap range spans multiple pmds, for which
389 * we are not prepared:
391 #define __FIXADDR_TOP (-PAGE_SIZE)
392 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
)
393 != (__fix_to_virt(FIX_BTMAP_END
) >> PMD_SHIFT
));
395 if (pmd
!= early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END
))) {
397 printk(KERN_WARNING
"pmd %p != %p\n",
398 pmd
, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END
)));
399 printk(KERN_WARNING
"fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
400 fix_to_virt(FIX_BTMAP_BEGIN
));
401 printk(KERN_WARNING
"fix_to_virt(FIX_BTMAP_END): %08lx\n",
402 fix_to_virt(FIX_BTMAP_END
));
404 printk(KERN_WARNING
"FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
405 printk(KERN_WARNING
"FIX_BTMAP_BEGIN: %d\n",
410 void __init
early_ioremap_reset(void)
412 after_paging_init
= 1;
415 static void __init
__early_set_fixmap(enum fixed_addresses idx
,
416 phys_addr_t phys
, pgprot_t flags
)
418 unsigned long addr
= __fix_to_virt(idx
);
421 if (idx
>= __end_of_fixed_addresses
) {
425 pte
= early_ioremap_pte(addr
);
427 if (pgprot_val(flags
))
428 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
430 pte_clear(&init_mm
, addr
, pte
);
431 __flush_tlb_one(addr
);
434 static inline void __init
early_set_fixmap(enum fixed_addresses idx
,
435 phys_addr_t phys
, pgprot_t prot
)
437 if (after_paging_init
)
438 __set_fixmap(idx
, phys
, prot
);
440 __early_set_fixmap(idx
, phys
, prot
);
443 static inline void __init
early_clear_fixmap(enum fixed_addresses idx
)
445 if (after_paging_init
)
448 __early_set_fixmap(idx
, 0, __pgprot(0));
451 static void __iomem
*prev_map
[FIX_BTMAPS_SLOTS
] __initdata
;
452 static unsigned long prev_size
[FIX_BTMAPS_SLOTS
] __initdata
;
454 void __init
fixup_early_ioremap(void)
458 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
465 early_ioremap_init();
468 static int __init
check_early_ioremap_leak(void)
473 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
480 "Debug warning: early ioremap leak of %d areas detected.\n",
483 "please boot with early_ioremap_debug and report the dmesg.\n");
487 late_initcall(check_early_ioremap_leak
);
489 static void __init __iomem
*
490 __early_ioremap(resource_size_t phys_addr
, unsigned long size
, pgprot_t prot
)
492 unsigned long offset
;
493 resource_size_t last_addr
;
494 unsigned int nrpages
;
495 enum fixed_addresses idx0
, idx
;
498 WARN_ON(system_state
!= SYSTEM_BOOTING
);
501 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
509 printk(KERN_INFO
"early_iomap(%08llx, %08lx) not found slot\n",
510 (u64
)phys_addr
, size
);
515 if (early_ioremap_debug
) {
516 printk(KERN_INFO
"early_ioremap(%08llx, %08lx) [%d] => ",
517 (u64
)phys_addr
, size
, slot
);
521 /* Don't allow wraparound or zero size */
522 last_addr
= phys_addr
+ size
- 1;
523 if (!size
|| last_addr
< phys_addr
) {
528 prev_size
[slot
] = size
;
530 * Mappings have to be page-aligned
532 offset
= phys_addr
& ~PAGE_MASK
;
533 phys_addr
&= PAGE_MASK
;
534 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
537 * Mappings have to fit in the FIX_BTMAP area.
539 nrpages
= size
>> PAGE_SHIFT
;
540 if (nrpages
> NR_FIX_BTMAPS
) {
548 idx0
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
550 while (nrpages
> 0) {
551 early_set_fixmap(idx
, phys_addr
, prot
);
552 phys_addr
+= PAGE_SIZE
;
556 if (early_ioremap_debug
)
557 printk(KERN_CONT
"%08lx + %08lx\n", offset
, slot_virt
[slot
]);
559 prev_map
[slot
] = (void __iomem
*)(offset
+ slot_virt
[slot
]);
560 return prev_map
[slot
];
563 /* Remap an IO device */
564 void __init __iomem
*
565 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
567 return __early_ioremap(phys_addr
, size
, PAGE_KERNEL_IO
);
571 void __init __iomem
*
572 early_memremap(resource_size_t phys_addr
, unsigned long size
)
574 return __early_ioremap(phys_addr
, size
, PAGE_KERNEL
);
577 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
579 unsigned long virt_addr
;
580 unsigned long offset
;
581 unsigned int nrpages
;
582 enum fixed_addresses idx
;
586 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
587 if (prev_map
[i
] == addr
) {
594 printk(KERN_INFO
"early_iounmap(%p, %08lx) not found slot\n",
600 if (prev_size
[slot
] != size
) {
601 printk(KERN_INFO
"early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
602 addr
, size
, slot
, prev_size
[slot
]);
607 if (early_ioremap_debug
) {
608 printk(KERN_INFO
"early_iounmap(%p, %08lx) [%d]\n", addr
,
613 virt_addr
= (unsigned long)addr
;
614 if (virt_addr
< fix_to_virt(FIX_BTMAP_BEGIN
)) {
618 offset
= virt_addr
& ~PAGE_MASK
;
619 nrpages
= PAGE_ALIGN(offset
+ size
) >> PAGE_SHIFT
;
621 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
622 while (nrpages
> 0) {
623 early_clear_fixmap(idx
);
627 prev_map
[slot
] = NULL
;