2 * arch/parisc/mm/ioremap.c
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
6 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
9 #include <linux/vmalloc.h>
10 #include <linux/errno.h>
11 #include <linux/module.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
18 remap_area_pte(pte_t
*pte
, unsigned long address
, unsigned long size
,
19 unsigned long phys_addr
, unsigned long flags
)
21 unsigned long end
, pfn
;
22 pgprot_t pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
| _PAGE_DIRTY
|
23 _PAGE_ACCESSED
| flags
);
31 BUG_ON(address
>= end
);
33 pfn
= phys_addr
>> PAGE_SHIFT
;
35 BUG_ON(!pte_none(*pte
));
37 set_pte(pte
, pfn_pte(pfn
, pgprot
));
42 } while (address
&& (address
< end
));
46 remap_area_pmd(pmd_t
*pmd
, unsigned long address
, unsigned long size
,
47 unsigned long phys_addr
, unsigned long flags
)
51 address
&= ~PGDIR_MASK
;
57 BUG_ON(address
>= end
);
61 pte_t
*pte
= pte_alloc_kernel(pmd
, address
);
65 remap_area_pte(pte
, address
, end
- address
,
66 address
+ phys_addr
, flags
);
68 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
70 } while (address
&& (address
< end
));
76 remap_area_pages(unsigned long address
, unsigned long phys_addr
,
77 unsigned long size
, unsigned long flags
)
81 unsigned long end
= address
+ size
;
83 BUG_ON(address
>= end
);
86 dir
= pgd_offset_k(address
);
95 pud
= pud_alloc(&init_mm
, dir
, address
);
99 pmd
= pmd_alloc(&init_mm
, pud
, address
);
103 if (remap_area_pmd(pmd
, address
, end
- address
,
104 phys_addr
+ address
, flags
))
108 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
110 } while (address
&& (address
< end
));
118 * Generic mapping function (not visible outside):
122 * Remap an arbitrary physical address space into the kernel virtual
125 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
126 * have to convert them into an offset in a page-aligned mapping, but the
127 * caller shouldn't need to know that small detail.
129 void __iomem
* __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
132 struct vm_struct
*area
;
133 unsigned long offset
, last_addr
;
136 unsigned long end
= phys_addr
+ size
- 1;
137 /* Support EISA addresses */
138 if ((phys_addr
>= 0x00080000 && end
< 0x000fffff) ||
139 (phys_addr
>= 0x00500000 && end
< 0x03bfffff)) {
140 phys_addr
|= F_EXTEND(0xfc000000);
144 /* Don't allow wraparound or zero size */
145 last_addr
= phys_addr
+ size
- 1;
146 if (!size
|| last_addr
< phys_addr
)
150 * Don't allow anybody to remap normal RAM that we're using..
152 if (phys_addr
< virt_to_phys(high_memory
)) {
153 char *t_addr
, *t_end
;
156 t_addr
= __va(phys_addr
);
157 t_end
= t_addr
+ (size
- 1);
159 for (page
= virt_to_page(t_addr
);
160 page
<= virt_to_page(t_end
); page
++) {
161 if(!PageReserved(page
))
167 * Mappings have to be page-aligned
169 offset
= phys_addr
& ~PAGE_MASK
;
170 phys_addr
&= PAGE_MASK
;
171 size
= PAGE_ALIGN(last_addr
) - phys_addr
;
176 area
= get_vm_area(size
, VM_IOREMAP
);
181 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
186 return (void __iomem
*) (offset
+ (char *)addr
);
188 EXPORT_SYMBOL(__ioremap
);
190 void iounmap(void __iomem
*addr
)
192 if (addr
> high_memory
)
193 return vfree((void *) (PAGE_MASK
& (unsigned long __force
) addr
));
195 EXPORT_SYMBOL(iounmap
);