2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
27 int page_is_ram(unsigned long pagenr
)
29 resource_size_t addr
, end
;
33 * A special case is the first 4Kb of memory;
34 * This is a BIOS owned area, not kernel ram, but generally
35 * not listed as such in the E820 table.
41 * Second special case: Some BIOSen report the PC BIOS
42 * area (640->1Mb) as ram even though it is not.
44 if (pagenr
>= (BIOS_BEGIN
>> PAGE_SHIFT
) &&
45 pagenr
< (BIOS_END
>> PAGE_SHIFT
))
48 for (i
= 0; i
< e820
.nr_map
; i
++) {
52 if (e820
.map
[i
].type
!= E820_RAM
)
54 addr
= (e820
.map
[i
].addr
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
55 end
= (e820
.map
[i
].addr
+ e820
.map
[i
].size
) >> PAGE_SHIFT
;
58 if ((pagenr
>= addr
) && (pagenr
< end
))
65 * Fix up the linear direct mapping of the kernel to avoid cache attribute
68 int ioremap_change_attr(unsigned long vaddr
, unsigned long size
,
69 unsigned long prot_val
)
71 unsigned long nrpages
= size
>> PAGE_SHIFT
;
77 err
= _set_memory_uc(vaddr
, nrpages
);
80 err
= _set_memory_wc(vaddr
, nrpages
);
83 err
= _set_memory_wb(vaddr
, nrpages
);
91 * Remap an arbitrary physical address space into the kernel virtual
92 * address space. Needed when the kernel wants to access high addresses
95 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
96 * have to convert them into an offset in a page-aligned mapping, but the
97 * caller shouldn't need to know that small detail.
99 static void __iomem
*__ioremap_caller(resource_size_t phys_addr
,
100 unsigned long size
, unsigned long prot_val
, void *caller
)
102 unsigned long pfn
, offset
, vaddr
;
103 resource_size_t last_addr
;
104 const resource_size_t unaligned_phys_addr
= phys_addr
;
105 const unsigned long unaligned_size
= size
;
106 struct vm_struct
*area
;
107 unsigned long new_prot_val
;
110 void __iomem
*ret_addr
;
112 /* Don't allow wraparound or zero size */
113 last_addr
= phys_addr
+ size
- 1;
114 if (!size
|| last_addr
< phys_addr
)
117 if (!phys_addr_valid(phys_addr
)) {
118 printk(KERN_WARNING
"ioremap: invalid physical address %llx\n",
119 (unsigned long long)phys_addr
);
125 * Don't remap the low PCI/ISA area, it's always mapped..
127 if (is_ISA_range(phys_addr
, last_addr
))
128 return (__force
void __iomem
*)phys_to_virt(phys_addr
);
131 * Check if the request spans more than any BAR in the iomem resource
134 WARN_ONCE(iomem_map_sanity_check(phys_addr
, size
),
135 KERN_INFO
"Info: mapping multiple BARs. Your kernel is fine.");
138 * Don't allow anybody to remap normal RAM that we're using..
140 for (pfn
= phys_addr
>> PAGE_SHIFT
;
141 (pfn
<< PAGE_SHIFT
) < (last_addr
& PAGE_MASK
);
144 int is_ram
= page_is_ram(pfn
);
146 if (is_ram
&& pfn_valid(pfn
) && !PageReserved(pfn_to_page(pfn
)))
148 WARN_ON_ONCE(is_ram
);
152 * Mappings have to be page-aligned
154 offset
= phys_addr
& ~PAGE_MASK
;
155 phys_addr
&= PAGE_MASK
;
156 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
158 retval
= reserve_memtype(phys_addr
, (u64
)phys_addr
+ size
,
159 prot_val
, &new_prot_val
);
161 pr_debug("Warning: reserve_memtype returned %d\n", retval
);
165 if (prot_val
!= new_prot_val
) {
167 * Do not fallback to certain memory types with certain
169 * - request is uc-, return cannot be write-back
170 * - request is uc-, return cannot be write-combine
171 * - request is write-combine, return cannot be write-back
173 if ((prot_val
== _PAGE_CACHE_UC_MINUS
&&
174 (new_prot_val
== _PAGE_CACHE_WB
||
175 new_prot_val
== _PAGE_CACHE_WC
)) ||
176 (prot_val
== _PAGE_CACHE_WC
&&
177 new_prot_val
== _PAGE_CACHE_WB
)) {
179 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
180 (unsigned long long)phys_addr
,
181 (unsigned long long)(phys_addr
+ size
),
182 prot_val
, new_prot_val
);
183 free_memtype(phys_addr
, phys_addr
+ size
);
186 prot_val
= new_prot_val
;
192 prot
= PAGE_KERNEL_IO_NOCACHE
;
194 case _PAGE_CACHE_UC_MINUS
:
195 prot
= PAGE_KERNEL_IO_UC_MINUS
;
198 prot
= PAGE_KERNEL_IO_WC
;
201 prot
= PAGE_KERNEL_IO
;
208 area
= get_vm_area_caller(size
, VM_IOREMAP
, caller
);
211 area
->phys_addr
= phys_addr
;
212 vaddr
= (unsigned long) area
->addr
;
214 if (kernel_map_sync_memtype(phys_addr
, size
, prot_val
)) {
215 free_memtype(phys_addr
, phys_addr
+ size
);
220 if (ioremap_page_range(vaddr
, vaddr
+ size
, phys_addr
, prot
)) {
221 free_memtype(phys_addr
, phys_addr
+ size
);
226 ret_addr
= (void __iomem
*) (vaddr
+ offset
);
227 mmiotrace_ioremap(unaligned_phys_addr
, unaligned_size
, ret_addr
);
233 * ioremap_nocache - map bus memory into CPU space
234 * @offset: bus address of the memory
235 * @size: size of the resource to map
237 * ioremap_nocache performs a platform specific sequence of operations to
238 * make bus memory CPU accessible via the readb/readw/readl/writeb/
239 * writew/writel functions and the other mmio helpers. The returned
240 * address is not guaranteed to be usable directly as a virtual
243 * This version of ioremap ensures that the memory is marked uncachable
244 * on the CPU as well as honouring existing caching rules from things like
245 * the PCI bus. Note that there are other caches and buffers on many
246 * busses. In particular driver authors should read up on PCI writes
248 * It's useful if some control registers are in such an area and
249 * write combining or read caching is not desirable:
251 * Must be freed with iounmap.
253 void __iomem
*ioremap_nocache(resource_size_t phys_addr
, unsigned long size
)
256 * Ideally, this should be:
257 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
259 * Till we fix all X drivers to use ioremap_wc(), we will use
262 unsigned long val
= _PAGE_CACHE_UC_MINUS
;
264 return __ioremap_caller(phys_addr
, size
, val
,
265 __builtin_return_address(0));
267 EXPORT_SYMBOL(ioremap_nocache
);
270 * ioremap_wc - map memory into CPU space write combined
271 * @offset: bus address of the memory
272 * @size: size of the resource to map
274 * This version of ioremap ensures that the memory is marked write combining.
275 * Write combining allows faster writes to some hardware devices.
277 * Must be freed with iounmap.
279 void __iomem
*ioremap_wc(resource_size_t phys_addr
, unsigned long size
)
282 return __ioremap_caller(phys_addr
, size
, _PAGE_CACHE_WC
,
283 __builtin_return_address(0));
285 return ioremap_nocache(phys_addr
, size
);
287 EXPORT_SYMBOL(ioremap_wc
);
289 void __iomem
*ioremap_cache(resource_size_t phys_addr
, unsigned long size
)
291 return __ioremap_caller(phys_addr
, size
, _PAGE_CACHE_WB
,
292 __builtin_return_address(0));
294 EXPORT_SYMBOL(ioremap_cache
);
296 static void __iomem
*ioremap_default(resource_size_t phys_addr
,
304 * - WB for WB-able memory and no other conflicting mappings
305 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
306 * - Inherit from confliting mappings otherwise
308 err
= reserve_memtype(phys_addr
, phys_addr
+ size
,
309 _PAGE_CACHE_WB
, &flags
);
313 ret
= __ioremap_caller(phys_addr
, size
, flags
,
314 __builtin_return_address(0));
316 free_memtype(phys_addr
, phys_addr
+ size
);
320 void __iomem
*ioremap_prot(resource_size_t phys_addr
, unsigned long size
,
321 unsigned long prot_val
)
323 return __ioremap_caller(phys_addr
, size
, (prot_val
& _PAGE_CACHE_MASK
),
324 __builtin_return_address(0));
326 EXPORT_SYMBOL(ioremap_prot
);
329 * iounmap - Free a IO remapping
330 * @addr: virtual address from ioremap_*
332 * Caller must ensure there is only one unmapping for the same pointer.
334 void iounmap(volatile void __iomem
*addr
)
336 struct vm_struct
*p
, *o
;
338 if ((void __force
*)addr
<= high_memory
)
342 * __ioremap special-cases the PCI/ISA range by not instantiating a
343 * vm_area and by simply returning an address into the kernel mapping
344 * of ISA space. So handle that here.
346 if ((void __force
*)addr
>= phys_to_virt(ISA_START_ADDRESS
) &&
347 (void __force
*)addr
< phys_to_virt(ISA_END_ADDRESS
))
350 addr
= (volatile void __iomem
*)
351 (PAGE_MASK
& (unsigned long __force
)addr
);
353 mmiotrace_iounmap(addr
);
355 /* Use the vm area unlocked, assuming the caller
356 ensures there isn't another iounmap for the same address
357 in parallel. Reuse of the virtual address is prevented by
358 leaving it in the global lists until we're done with it.
359 cpa takes care of the direct mappings. */
360 read_lock(&vmlist_lock
);
361 for (p
= vmlist
; p
; p
= p
->next
) {
362 if (p
->addr
== (void __force
*)addr
)
365 read_unlock(&vmlist_lock
);
368 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
373 free_memtype(p
->phys_addr
, p
->phys_addr
+ get_vm_area_size(p
));
375 /* Finally remove it */
376 o
= remove_vm_area((void __force
*)addr
);
377 BUG_ON(p
!= o
|| o
== NULL
);
380 EXPORT_SYMBOL(iounmap
);
383 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
386 void *xlate_dev_mem_ptr(unsigned long phys
)
389 unsigned long start
= phys
& PAGE_MASK
;
391 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
392 if (page_is_ram(start
>> PAGE_SHIFT
))
395 addr
= (void __force
*)ioremap_default(start
, PAGE_SIZE
);
397 addr
= (void *)((unsigned long)addr
| (phys
& ~PAGE_MASK
));
402 void unxlate_dev_mem_ptr(unsigned long phys
, void *addr
)
404 if (page_is_ram(phys
>> PAGE_SHIFT
))
407 iounmap((void __iomem
*)((unsigned long)addr
& PAGE_MASK
));
411 static int __initdata early_ioremap_debug
;
413 static int __init
early_ioremap_debug_setup(char *str
)
415 early_ioremap_debug
= 1;
419 early_param("early_ioremap_debug", early_ioremap_debug_setup
);
421 static __initdata
int after_paging_init
;
422 static pte_t bm_pte
[PAGE_SIZE
/sizeof(pte_t
)] __page_aligned_bss
;
424 static inline pmd_t
* __init
early_ioremap_pmd(unsigned long addr
)
426 /* Don't assume we're using swapper_pg_dir at this point */
427 pgd_t
*base
= __va(read_cr3());
428 pgd_t
*pgd
= &base
[pgd_index(addr
)];
429 pud_t
*pud
= pud_offset(pgd
, addr
);
430 pmd_t
*pmd
= pmd_offset(pud
, addr
);
435 static inline pte_t
* __init
early_ioremap_pte(unsigned long addr
)
437 return &bm_pte
[pte_index(addr
)];
440 static unsigned long slot_virt
[FIX_BTMAPS_SLOTS
] __initdata
;
442 void __init
early_ioremap_init(void)
447 if (early_ioremap_debug
)
448 printk(KERN_INFO
"early_ioremap_init()\n");
450 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
451 slot_virt
[i
] = __fix_to_virt(FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*i
);
453 pmd
= early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
));
454 memset(bm_pte
, 0, sizeof(bm_pte
));
455 pmd_populate_kernel(&init_mm
, pmd
, bm_pte
);
458 * The boot-ioremap range spans multiple pmds, for which
459 * we are not prepared:
461 if (pmd
!= early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END
))) {
463 printk(KERN_WARNING
"pmd %p != %p\n",
464 pmd
, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END
)));
465 printk(KERN_WARNING
"fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
466 fix_to_virt(FIX_BTMAP_BEGIN
));
467 printk(KERN_WARNING
"fix_to_virt(FIX_BTMAP_END): %08lx\n",
468 fix_to_virt(FIX_BTMAP_END
));
470 printk(KERN_WARNING
"FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
471 printk(KERN_WARNING
"FIX_BTMAP_BEGIN: %d\n",
476 void __init
early_ioremap_reset(void)
478 after_paging_init
= 1;
481 static void __init
__early_set_fixmap(enum fixed_addresses idx
,
482 phys_addr_t phys
, pgprot_t flags
)
484 unsigned long addr
= __fix_to_virt(idx
);
487 if (idx
>= __end_of_fixed_addresses
) {
491 pte
= early_ioremap_pte(addr
);
493 if (pgprot_val(flags
))
494 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
496 pte_clear(&init_mm
, addr
, pte
);
497 __flush_tlb_one(addr
);
500 static inline void __init
early_set_fixmap(enum fixed_addresses idx
,
501 phys_addr_t phys
, pgprot_t prot
)
503 if (after_paging_init
)
504 __set_fixmap(idx
, phys
, prot
);
506 __early_set_fixmap(idx
, phys
, prot
);
509 static inline void __init
early_clear_fixmap(enum fixed_addresses idx
)
511 if (after_paging_init
)
514 __early_set_fixmap(idx
, 0, __pgprot(0));
517 static void __iomem
*prev_map
[FIX_BTMAPS_SLOTS
] __initdata
;
518 static unsigned long prev_size
[FIX_BTMAPS_SLOTS
] __initdata
;
520 static int __init
check_early_ioremap_leak(void)
525 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
532 "Debug warning: early ioremap leak of %d areas detected.\n",
535 "please boot with early_ioremap_debug and report the dmesg.\n");
539 late_initcall(check_early_ioremap_leak
);
541 static void __init __iomem
*
542 __early_ioremap(resource_size_t phys_addr
, unsigned long size
, pgprot_t prot
)
544 unsigned long offset
;
545 resource_size_t last_addr
;
546 unsigned int nrpages
;
547 enum fixed_addresses idx0
, idx
;
550 WARN_ON(system_state
!= SYSTEM_BOOTING
);
553 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
561 printk(KERN_INFO
"early_iomap(%08llx, %08lx) not found slot\n",
562 (u64
)phys_addr
, size
);
567 if (early_ioremap_debug
) {
568 printk(KERN_INFO
"early_ioremap(%08llx, %08lx) [%d] => ",
569 (u64
)phys_addr
, size
, slot
);
573 /* Don't allow wraparound or zero size */
574 last_addr
= phys_addr
+ size
- 1;
575 if (!size
|| last_addr
< phys_addr
) {
580 prev_size
[slot
] = size
;
582 * Mappings have to be page-aligned
584 offset
= phys_addr
& ~PAGE_MASK
;
585 phys_addr
&= PAGE_MASK
;
586 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
589 * Mappings have to fit in the FIX_BTMAP area.
591 nrpages
= size
>> PAGE_SHIFT
;
592 if (nrpages
> NR_FIX_BTMAPS
) {
600 idx0
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
602 while (nrpages
> 0) {
603 early_set_fixmap(idx
, phys_addr
, prot
);
604 phys_addr
+= PAGE_SIZE
;
608 if (early_ioremap_debug
)
609 printk(KERN_CONT
"%08lx + %08lx\n", offset
, slot_virt
[slot
]);
611 prev_map
[slot
] = (void __iomem
*)(offset
+ slot_virt
[slot
]);
612 return prev_map
[slot
];
615 /* Remap an IO device */
616 void __init __iomem
*
617 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
619 return __early_ioremap(phys_addr
, size
, PAGE_KERNEL_IO
);
623 void __init __iomem
*
624 early_memremap(resource_size_t phys_addr
, unsigned long size
)
626 return __early_ioremap(phys_addr
, size
, PAGE_KERNEL
);
629 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
631 unsigned long virt_addr
;
632 unsigned long offset
;
633 unsigned int nrpages
;
634 enum fixed_addresses idx
;
638 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
639 if (prev_map
[i
] == addr
) {
646 printk(KERN_INFO
"early_iounmap(%p, %08lx) not found slot\n",
652 if (prev_size
[slot
] != size
) {
653 printk(KERN_INFO
"early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
654 addr
, size
, slot
, prev_size
[slot
]);
659 if (early_ioremap_debug
) {
660 printk(KERN_INFO
"early_iounmap(%p, %08lx) [%d]\n", addr
,
665 virt_addr
= (unsigned long)addr
;
666 if (virt_addr
< fix_to_virt(FIX_BTMAP_BEGIN
)) {
670 offset
= virt_addr
& ~PAGE_MASK
;
671 nrpages
= PAGE_ALIGN(offset
+ size
- 1) >> PAGE_SHIFT
;
673 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
674 while (nrpages
> 0) {
675 early_clear_fixmap(idx
);
679 prev_map
[slot
] = NULL
;