2 * arch/parisc/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
12 #include <linux/vmalloc.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
16 #include <asm/pgalloc.h>
18 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
19 unsigned long phys_addr
, unsigned long flags
)
30 if (!pte_none(*pte
)) {
31 printk(KERN_ERR
"remap_area_pte: page already exists\n");
34 set_pte(pte
, mk_pte_phys(phys_addr
, __pgprot(_PAGE_PRESENT
| _PAGE_RW
|
35 _PAGE_DIRTY
| _PAGE_ACCESSED
| flags
)));
37 phys_addr
+= PAGE_SIZE
;
39 } while (address
&& (address
< end
));
42 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
43 unsigned long phys_addr
, unsigned long flags
)
47 address
&= ~PGDIR_MASK
;
55 pte_t
* pte
= pte_alloc_kernel(NULL
, pmd
, address
);
58 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
59 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
61 } while (address
&& (address
< end
));
65 #if (USE_HPPA_IOREMAP)
66 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
67 unsigned long size
, unsigned long flags
)
71 unsigned long end
= address
+ size
;
74 dir
= pgd_offset(&init_mm
, address
);
78 spin_lock(&init_mm
.page_table_lock
);
81 pmd
= pmd_alloc(dir
, address
);
85 if (remap_area_pmd(pmd
, address
, end
- address
,
86 phys_addr
+ address
, flags
))
89 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
91 } while (address
&& (address
< end
));
92 spin_unlock(&init_mm
.page_table_lock
);
96 #endif /* USE_HPPA_IOREMAP */
98 #ifdef CONFIG_DEBUG_IOREMAP
99 static unsigned long last
= 0;
101 void gsc_bad_addr(unsigned long addr
)
103 if (time_after(jiffies
, last
+ HZ
*10)) {
104 printk("gsc_foo() called with bad address 0x%lx\n", addr
);
109 EXPORT_SYMBOL(gsc_bad_addr
);
111 void __raw_bad_addr(const volatile void __iomem
*addr
)
113 if (time_after(jiffies
, last
+ HZ
*10)) {
114 printk("__raw_foo() called with bad address 0x%p\n", addr
);
119 EXPORT_SYMBOL(__raw_bad_addr
);
123 * Generic mapping function (not visible outside):
127 * Remap an arbitrary physical address space into the kernel virtual
128 * address space. Needed when the kernel wants to access high addresses
131 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
132 * have to convert them into an offset in a page-aligned mapping, but the
133 * caller shouldn't need to know that small detail.
135 void __iomem
* __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
137 #if !(USE_HPPA_IOREMAP)
139 unsigned long end
= phys_addr
+ size
- 1;
140 /* Support EISA addresses */
141 if ((phys_addr
>= 0x00080000 && end
< 0x000fffff)
142 || (phys_addr
>= 0x00500000 && end
< 0x03bfffff)) {
143 phys_addr
|= 0xfc000000;
146 #ifdef CONFIG_DEBUG_IOREMAP
147 return (void __iomem
*)(phys_addr
- (0x1UL
<< NYBBLE_SHIFT
));
149 return (void __iomem
*)phys_addr
;
154 struct vm_struct
* area
;
155 unsigned long offset
, last_addr
;
157 /* Don't allow wraparound or zero size */
158 last_addr
= phys_addr
+ size
- 1;
159 if (!size
|| last_addr
< phys_addr
)
163 * Don't allow anybody to remap normal RAM that we're using..
165 if (phys_addr
< virt_to_phys(high_memory
)) {
166 char *t_addr
, *t_end
;
169 t_addr
= __va(phys_addr
);
170 t_end
= t_addr
+ (size
- 1);
172 for(page
= virt_to_page(t_addr
); page
<= virt_to_page(t_end
); page
++)
173 if(!PageReserved(page
))
178 * Mappings have to be page-aligned
180 offset
= phys_addr
& ~PAGE_MASK
;
181 phys_addr
&= PAGE_MASK
;
182 size
= PAGE_ALIGN(last_addr
) - phys_addr
;
187 area
= get_vm_area(size
, VM_IOREMAP
);
191 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
195 return (void __iomem
*) (offset
+ (char *)addr
);
199 void iounmap(void __iomem
*addr
)
201 #if !(USE_HPPA_IOREMAP)
204 if (addr
> high_memory
)
205 return vfree((void *) (PAGE_MASK
& (unsigned long __force
) addr
));