2 * arch/i386/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
24 static int ioremap_pte_range(pmd_t
*pmd
, unsigned long addr
,
25 unsigned long end
, unsigned long phys_addr
, unsigned long flags
)
30 pfn
= phys_addr
>> PAGE_SHIFT
;
31 pte
= pte_alloc_kernel(&init_mm
, pmd
, addr
);
35 BUG_ON(!pte_none(*pte
));
36 set_pte(pte
, pfn_pte(pfn
, __pgprot(_PAGE_PRESENT
| _PAGE_RW
|
37 _PAGE_DIRTY
| _PAGE_ACCESSED
| flags
)));
39 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
43 static inline int ioremap_pmd_range(pud_t
*pud
, unsigned long addr
,
44 unsigned long end
, unsigned long phys_addr
, unsigned long flags
)
50 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
54 next
= pmd_addr_end(addr
, end
);
55 if (ioremap_pte_range(pmd
, addr
, next
, phys_addr
+ addr
, flags
))
57 } while (pmd
++, addr
= next
, addr
!= end
);
61 static inline int ioremap_pud_range(pgd_t
*pgd
, unsigned long addr
,
62 unsigned long end
, unsigned long phys_addr
, unsigned long flags
)
68 pud
= pud_alloc(&init_mm
, pgd
, addr
);
72 next
= pud_addr_end(addr
, end
);
73 if (ioremap_pmd_range(pud
, addr
, next
, phys_addr
+ addr
, flags
))
75 } while (pud
++, addr
= next
, addr
!= end
);
79 static int ioremap_page_range(unsigned long addr
,
80 unsigned long end
, unsigned long phys_addr
, unsigned long flags
)
89 pgd
= pgd_offset_k(addr
);
90 spin_lock(&init_mm
.page_table_lock
);
92 next
= pgd_addr_end(addr
, end
);
93 err
= ioremap_pud_range(pgd
, addr
, next
, phys_addr
+addr
, flags
);
96 } while (pgd
++, addr
= next
, addr
!= end
);
97 spin_unlock(&init_mm
.page_table_lock
);
103 * Generic mapping function (not visible outside):
107 * Remap an arbitrary physical address space into the kernel virtual
108 * address space. Needed when the kernel wants to access high addresses
111 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
112 * have to convert them into an offset in a page-aligned mapping, but the
113 * caller shouldn't need to know that small detail.
115 void __iomem
* __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
118 struct vm_struct
* area
;
119 unsigned long offset
, last_addr
;
121 /* Don't allow wraparound or zero size */
122 last_addr
= phys_addr
+ size
- 1;
123 if (!size
|| last_addr
< phys_addr
)
127 * Don't remap the low PCI/ISA area, it's always mapped..
129 if (phys_addr
>= ISA_START_ADDRESS
&& last_addr
< ISA_END_ADDRESS
)
130 return (void __iomem
*) phys_to_virt(phys_addr
);
133 * Don't allow anybody to remap normal RAM that we're using..
135 if (phys_addr
<= virt_to_phys(high_memory
- 1)) {
136 char *t_addr
, *t_end
;
139 t_addr
= __va(phys_addr
);
140 t_end
= t_addr
+ (size
- 1);
142 for(page
= virt_to_page(t_addr
); page
<= virt_to_page(t_end
); page
++)
143 if(!PageReserved(page
))
148 * Mappings have to be page-aligned
150 offset
= phys_addr
& ~PAGE_MASK
;
151 phys_addr
&= PAGE_MASK
;
152 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
157 area
= get_vm_area(size
, VM_IOREMAP
| (flags
<< 20));
160 area
->phys_addr
= phys_addr
;
161 addr
= (void __iomem
*) area
->addr
;
162 if (ioremap_page_range((unsigned long) addr
,
163 (unsigned long) addr
+ size
, phys_addr
, flags
)) {
164 vunmap((void __force
*) addr
);
167 return (void __iomem
*) (offset
+ (char __iomem
*)addr
);
169 EXPORT_SYMBOL(__ioremap
);
172 * ioremap_nocache - map bus memory into CPU space
173 * @offset: bus address of the memory
174 * @size: size of the resource to map
176 * ioremap_nocache performs a platform specific sequence of operations to
177 * make bus memory CPU accessible via the readb/readw/readl/writeb/
178 * writew/writel functions and the other mmio helpers. The returned
179 * address is not guaranteed to be usable directly as a virtual
182 * This version of ioremap ensures that the memory is marked uncachable
183 * on the CPU as well as honouring existing caching rules from things like
184 * the PCI bus. Note that there are other caches and buffers on many
185 * busses. In particular driver authors should read up on PCI writes
187 * It's useful if some control registers are in such an area and
188 * write combining or read caching is not desirable:
190 * Must be freed with iounmap.
193 void __iomem
*ioremap_nocache (unsigned long phys_addr
, unsigned long size
)
195 unsigned long last_addr
;
196 void __iomem
*p
= __ioremap(phys_addr
, size
, _PAGE_PCD
);
200 /* Guaranteed to be > phys_addr, as per __ioremap() */
201 last_addr
= phys_addr
+ size
- 1;
203 if (last_addr
< virt_to_phys(high_memory
) - 1) {
204 struct page
*ppage
= virt_to_page(__va(phys_addr
));
205 unsigned long npages
;
207 phys_addr
&= PAGE_MASK
;
209 /* This might overflow and become zero.. */
210 last_addr
= PAGE_ALIGN(last_addr
);
212 /* .. but that's ok, because modulo-2**n arithmetic will make
213 * the page-aligned "last - first" come out right.
215 npages
= (last_addr
- phys_addr
) >> PAGE_SHIFT
;
217 if (change_page_attr(ppage
, npages
, PAGE_KERNEL_NOCACHE
) < 0) {
226 EXPORT_SYMBOL(ioremap_nocache
);
228 void iounmap(volatile void __iomem
*addr
)
232 if ((void __force
*)addr
<= high_memory
)
236 * __ioremap special-cases the PCI/ISA range by not instantiating a
237 * vm_area and by simply returning an address into the kernel mapping
238 * of ISA space. So handle that here.
240 if (addr
>= phys_to_virt(ISA_START_ADDRESS
) &&
241 addr
< phys_to_virt(ISA_END_ADDRESS
))
244 write_lock(&vmlist_lock
);
245 p
= __remove_vm_area((void *)(PAGE_MASK
& (unsigned long __force
)addr
));
247 printk(KERN_WARNING
"iounmap: bad address %p\n", addr
);
252 if ((p
->flags
>> 20) && p
->phys_addr
< virt_to_phys(high_memory
) - 1) {
253 change_page_attr(virt_to_page(__va(p
->phys_addr
)),
254 p
->size
>> PAGE_SHIFT
,
259 write_unlock(&vmlist_lock
);
262 EXPORT_SYMBOL(iounmap
);
264 void __init
*bt_ioremap(unsigned long phys_addr
, unsigned long size
)
266 unsigned long offset
, last_addr
;
267 unsigned int nrpages
;
268 enum fixed_addresses idx
;
270 /* Don't allow wraparound or zero size */
271 last_addr
= phys_addr
+ size
- 1;
272 if (!size
|| last_addr
< phys_addr
)
276 * Don't remap the low PCI/ISA area, it's always mapped..
278 if (phys_addr
>= ISA_START_ADDRESS
&& last_addr
< ISA_END_ADDRESS
)
279 return phys_to_virt(phys_addr
);
282 * Mappings have to be page-aligned
284 offset
= phys_addr
& ~PAGE_MASK
;
285 phys_addr
&= PAGE_MASK
;
286 size
= PAGE_ALIGN(last_addr
) - phys_addr
;
289 * Mappings have to fit in the FIX_BTMAP area.
291 nrpages
= size
>> PAGE_SHIFT
;
292 if (nrpages
> NR_FIX_BTMAPS
)
298 idx
= FIX_BTMAP_BEGIN
;
299 while (nrpages
> 0) {
300 set_fixmap(idx
, phys_addr
);
301 phys_addr
+= PAGE_SIZE
;
305 return (void*) (offset
+ fix_to_virt(FIX_BTMAP_BEGIN
));
308 void __init
bt_iounmap(void *addr
, unsigned long size
)
310 unsigned long virt_addr
;
311 unsigned long offset
;
312 unsigned int nrpages
;
313 enum fixed_addresses idx
;
315 virt_addr
= (unsigned long)addr
;
316 if (virt_addr
< fix_to_virt(FIX_BTMAP_BEGIN
))
318 offset
= virt_addr
& ~PAGE_MASK
;
319 nrpages
= PAGE_ALIGN(offset
+ size
- 1) >> PAGE_SHIFT
;
321 idx
= FIX_BTMAP_BEGIN
;
322 while (nrpages
> 0) {