2 * arch/x86_64/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
15 #include <asm/pgalloc.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/proto.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
24 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
25 unsigned long phys_addr
, unsigned long flags
)
36 pfn
= phys_addr
>> PAGE_SHIFT
;
38 if (!pte_none(*pte
)) {
39 printk("remap_area_pte: page already exists\n");
42 set_pte(pte
, pfn_pte(pfn
, __pgprot(_PAGE_PRESENT
| _PAGE_RW
|
43 _PAGE_GLOBAL
| _PAGE_DIRTY
| _PAGE_ACCESSED
| flags
)));
47 } while (address
&& (address
< end
));
50 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
51 unsigned long phys_addr
, unsigned long flags
)
63 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
66 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
67 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
69 } while (address
&& (address
< end
));
73 static inline int remap_area_pud(pud_t
* pud
, unsigned long address
, unsigned long size
,
74 unsigned long phys_addr
, unsigned long flags
)
78 address
&= ~PGDIR_MASK
;
86 pmd_t
* pmd
= pmd_alloc(&init_mm
, pud
, address
);
89 remap_area_pmd(pmd
, address
, end
- address
, address
+ phys_addr
, flags
);
90 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
92 } while (address
&& (address
< end
));
96 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
97 unsigned long size
, unsigned long flags
)
101 unsigned long end
= address
+ size
;
103 phys_addr
-= address
;
104 pgd
= pgd_offset_k(address
);
108 spin_lock(&init_mm
.page_table_lock
);
111 pud
= pud_alloc(&init_mm
, pgd
, address
);
115 if (remap_area_pud(pud
, address
, end
- address
,
116 phys_addr
+ address
, flags
))
119 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
121 } while (address
&& (address
< end
));
122 spin_unlock(&init_mm
.page_table_lock
);
128 * Fix up the linear direct mapping of the kernel to avoid cache attribute
132 ioremap_change_attr(unsigned long phys_addr
, unsigned long size
,
136 if (flags
&& phys_addr
+ size
- 1 < (end_pfn_map
<< PAGE_SHIFT
)) {
137 unsigned long npages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
138 unsigned long vaddr
= (unsigned long) __va(phys_addr
);
141 * Must use a address here and not struct page because the phys addr
142 * can be a in hole between nodes and not have an memmap entry.
144 err
= change_page_attr_addr(vaddr
,npages
,__pgprot(__PAGE_KERNEL
|flags
));
152 * Generic mapping function
156 * Remap an arbitrary physical address space into the kernel virtual
157 * address space. Needed when the kernel wants to access high addresses
160 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
161 * have to convert them into an offset in a page-aligned mapping, but the
162 * caller shouldn't need to know that small detail.
164 void __iomem
* __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
167 struct vm_struct
* area
;
168 unsigned long offset
, last_addr
;
170 /* Don't allow wraparound or zero size */
171 last_addr
= phys_addr
+ size
- 1;
172 if (!size
|| last_addr
< phys_addr
)
176 * Don't remap the low PCI/ISA area, it's always mapped..
178 if (phys_addr
>= ISA_START_ADDRESS
&& last_addr
< ISA_END_ADDRESS
)
179 return (__force
void __iomem
*)phys_to_virt(phys_addr
);
181 #ifndef CONFIG_DISCONTIGMEM
183 * Don't allow anybody to remap normal RAM that we're using..
185 if (last_addr
< virt_to_phys(high_memory
)) {
186 char *t_addr
, *t_end
;
189 t_addr
= __va(phys_addr
);
190 t_end
= t_addr
+ (size
- 1);
192 for(page
= virt_to_page(t_addr
); page
<= virt_to_page(t_end
); page
++)
193 if(!PageReserved(page
))
199 * Mappings have to be page-aligned
201 offset
= phys_addr
& ~PAGE_MASK
;
202 phys_addr
&= PAGE_MASK
;
203 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
208 area
= get_vm_area(size
, VM_IOREMAP
| (flags
<< 20));
211 area
->phys_addr
= phys_addr
;
213 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
214 remove_vm_area((void *)(PAGE_MASK
& (unsigned long) addr
));
217 if (ioremap_change_attr(phys_addr
, size
, flags
) < 0) {
218 area
->flags
&= 0xffffff;
222 return (__force
void __iomem
*) (offset
+ (char *)addr
);
226 * ioremap_nocache - map bus memory into CPU space
227 * @offset: bus address of the memory
228 * @size: size of the resource to map
230 * ioremap_nocache performs a platform specific sequence of operations to
231 * make bus memory CPU accessible via the readb/readw/readl/writeb/
232 * writew/writel functions and the other mmio helpers. The returned
233 * address is not guaranteed to be usable directly as a virtual
236 * This version of ioremap ensures that the memory is marked uncachable
237 * on the CPU as well as honouring existing caching rules from things like
238 * the PCI bus. Note that there are other caches and buffers on many
239 * busses. In particular driver authors should read up on PCI writes
241 * It's useful if some control registers are in such an area and
242 * write combining or read caching is not desirable:
244 * Must be freed with iounmap.
247 void __iomem
*ioremap_nocache (unsigned long phys_addr
, unsigned long size
)
249 return __ioremap(phys_addr
, size
, _PAGE_PCD
);
252 void iounmap(volatile void __iomem
*addr
)
254 struct vm_struct
*p
, **pprev
;
256 if (addr
<= high_memory
)
258 if (addr
>= phys_to_virt(ISA_START_ADDRESS
) &&
259 addr
< phys_to_virt(ISA_END_ADDRESS
))
262 write_lock(&vmlist_lock
);
263 for (p
= vmlist
, pprev
= &vmlist
; p
!= NULL
; pprev
= &p
->next
, p
= *pprev
)
264 if (p
->addr
== (void *)(PAGE_MASK
& (unsigned long)addr
))
267 printk("__iounmap: bad address %p\n", addr
);
272 if ((p
->flags
>> 20) &&
273 p
->phys_addr
+ p
->size
- 1 < virt_to_phys(high_memory
)) {
274 /* p->size includes the guard page, but cpa doesn't like that */
275 change_page_attr(virt_to_page(__va(p
->phys_addr
)),
276 p
->size
>> PAGE_SHIFT
,
281 write_unlock(&vmlist_lock
);