2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
13 #include <linux/vmalloc.h>
14 #include <asm/cacheflush.h>
16 #include <asm/tlbflush.h>
18 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
,
19 phys_t size
, phys_t phys_addr
, unsigned long flags
)
23 pgprot_t pgprot
= __pgprot(_PAGE_GLOBAL
| _PAGE_PRESENT
| __READABLE
24 | __WRITEABLE
| flags
);
32 pfn
= phys_addr
>> PAGE_SHIFT
;
34 if (!pte_none(*pte
)) {
35 printk("remap_area_pte: page already exists\n");
38 set_pte(pte
, pfn_pte(pfn
, pgprot
));
42 } while (address
&& (address
< end
));
45 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
,
46 phys_t size
, phys_t phys_addr
, unsigned long flags
)
50 address
&= ~PGDIR_MASK
;
58 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
61 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
62 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
64 } while (address
&& (address
< end
));
68 static int remap_area_pages(unsigned long address
, phys_t phys_addr
,
69 phys_t size
, unsigned long flags
)
73 unsigned long end
= address
+ size
;
76 dir
= pgd_offset(&init_mm
, address
);
80 spin_lock(&init_mm
.page_table_lock
);
83 pmd
= pmd_alloc(&init_mm
, dir
, address
);
87 if (remap_area_pmd(pmd
, address
, end
- address
,
88 phys_addr
+ address
, flags
))
91 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
93 } while (address
&& (address
< end
));
94 spin_unlock(&init_mm
.page_table_lock
);
100 * Allow physical addresses to be fixed up to help 36 bit peripherals.
102 phys_t
__attribute__ ((weak
))
103 fixup_bigphys_addr(phys_t phys_addr
, phys_t size
)
109 * Generic mapping function (not visible outside):
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
122 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
124 void * __ioremap(phys_t phys_addr
, phys_t size
, unsigned long flags
)
126 struct vm_struct
* area
;
127 unsigned long offset
;
131 phys_addr
= fixup_bigphys_addr(phys_addr
, size
);
133 /* Don't allow wraparound or zero size */
134 last_addr
= phys_addr
+ size
- 1;
135 if (!size
|| last_addr
< phys_addr
)
139 * Map uncached objects in the low 512mb of address space using KSEG1,
140 * otherwise map using page tables.
142 if (IS_LOW512(phys_addr
) && IS_LOW512(last_addr
) &&
143 flags
== _CACHE_UNCACHED
)
144 return (void *) KSEG1ADDR(phys_addr
);
147 * Don't allow anybody to remap normal RAM that we're using..
149 if (phys_addr
< virt_to_phys(high_memory
)) {
150 char *t_addr
, *t_end
;
153 t_addr
= __va(phys_addr
);
154 t_end
= t_addr
+ (size
- 1);
156 for(page
= virt_to_page(t_addr
); page
<= virt_to_page(t_end
); page
++)
157 if(!PageReserved(page
))
162 * Mappings have to be page-aligned
164 offset
= phys_addr
& ~PAGE_MASK
;
165 phys_addr
&= PAGE_MASK
;
166 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
171 area
= get_vm_area(size
, VM_IOREMAP
);
175 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
180 return (void *) (offset
+ (char *)addr
);
183 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
185 void __iounmap(volatile void __iomem
*addr
)
192 p
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long __force
) addr
));
194 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
201 EXPORT_SYMBOL(__ioremap
);
202 EXPORT_SYMBOL(__iounmap
);