2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/compiler.h>
11 #include <linux/module.h>
12 #include <linux/efi.h>
14 #include <linux/vmalloc.h>
16 #include <asm/meminit.h>
18 static inline void __iomem
*
19 __ioremap_uc(unsigned long phys_addr
)
21 return (void __iomem
*) (__IA64_UNCACHED_OFFSET
| phys_addr
);
25 early_ioremap (unsigned long phys_addr
, unsigned long size
)
28 attr
= kern_mem_attribute(phys_addr
, size
);
29 if (attr
& EFI_MEMORY_WB
)
30 return (void __iomem
*) phys_to_virt(phys_addr
);
31 return __ioremap_uc(phys_addr
);
35 ioremap (unsigned long phys_addr
, unsigned long size
)
38 struct vm_struct
*area
;
42 unsigned long gran_base
, gran_size
;
43 unsigned long page_base
;
46 * For things in kern_memmap, we must use the same attribute
47 * as the rest of the kernel. For more details, see
48 * Documentation/ia64/aliasing.txt.
50 attr
= kern_mem_attribute(phys_addr
, size
);
51 if (attr
& EFI_MEMORY_WB
)
52 return (void __iomem
*) phys_to_virt(phys_addr
);
53 else if (attr
& EFI_MEMORY_UC
)
54 return __ioremap_uc(phys_addr
);
57 * Some chipsets don't support UC access to memory. If
58 * WB is supported for the whole granule, we prefer that.
60 gran_base
= GRANULEROUNDDOWN(phys_addr
);
61 gran_size
= GRANULEROUNDUP(phys_addr
+ size
) - gran_base
;
62 if (efi_mem_attribute(gran_base
, gran_size
) & EFI_MEMORY_WB
)
63 return (void __iomem
*) phys_to_virt(phys_addr
);
66 * WB is not supported for the whole granule, so we can't use
67 * the region 7 identity mapping. If we can safely cover the
68 * area with kernel page table mappings, we can use those
71 page_base
= phys_addr
& PAGE_MASK
;
72 size
= PAGE_ALIGN(phys_addr
+ size
) - page_base
;
73 if (efi_mem_attribute(page_base
, size
) & EFI_MEMORY_WB
) {
77 * Mappings have to be page-aligned
79 offset
= phys_addr
& ~PAGE_MASK
;
80 phys_addr
&= PAGE_MASK
;
85 area
= get_vm_area(size
, VM_IOREMAP
);
89 area
->phys_addr
= phys_addr
;
90 addr
= (void __iomem
*) area
->addr
;
91 if (ioremap_page_range((unsigned long) addr
,
92 (unsigned long) addr
+ size
, phys_addr
, prot
)) {
93 vunmap((void __force
*) addr
);
97 return (void __iomem
*) (offset
+ (char __iomem
*)addr
);
100 return __ioremap_uc(phys_addr
);
102 EXPORT_SYMBOL(ioremap
);
105 ioremap_nocache (unsigned long phys_addr
, unsigned long size
)
107 if (kern_mem_attribute(phys_addr
, size
) & EFI_MEMORY_WB
)
110 return __ioremap_uc(phys_addr
);
112 EXPORT_SYMBOL(ioremap_nocache
);
115 early_iounmap (volatile void __iomem
*addr
, unsigned long size
)
120 iounmap (volatile void __iomem
*addr
)
122 if (REGION_NUMBER(addr
) == RGN_GATE
)
123 vunmap((void *) ((unsigned long) addr
& PAGE_MASK
));
125 EXPORT_SYMBOL(iounmap
);