2 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
5 * Copyright (C) 2004 Microtronix Datacom Ltd.
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/export.h>
13 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 static inline void remap_area_pte(pte_t
*pte
, unsigned long address
,
23 unsigned long size
, unsigned long phys_addr
,
28 pgprot_t pgprot
= __pgprot(_PAGE_GLOBAL
| _PAGE_PRESENT
| _PAGE_READ
29 | _PAGE_WRITE
| flags
);
37 pfn
= PFN_DOWN(phys_addr
);
39 if (!pte_none(*pte
)) {
40 pr_err("remap_area_pte: page already exists\n");
43 set_pte(pte
, pfn_pte(pfn
, pgprot
));
47 } while (address
&& (address
< end
));
50 static inline int remap_area_pmd(pmd_t
*pmd
, unsigned long address
,
51 unsigned long size
, unsigned long phys_addr
,
56 address
&= ~PGDIR_MASK
;
64 pte_t
*pte
= pte_alloc_kernel(pmd
, address
);
68 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
,
70 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
72 } while (address
&& (address
< end
));
76 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
77 unsigned long size
, unsigned long flags
)
81 unsigned long end
= address
+ size
;
84 dir
= pgd_offset(&init_mm
, address
);
93 pud
= pud_alloc(&init_mm
, dir
, address
);
96 pmd
= pmd_alloc(&init_mm
, pud
, address
);
99 if (remap_area_pmd(pmd
, address
, end
- address
,
100 phys_addr
+ address
, flags
))
103 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
105 } while (address
&& (address
< end
));
110 #define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
113 * Map some physical address range into the kernel address space.
115 void __iomem
*ioremap(unsigned long phys_addr
, unsigned long size
)
117 struct vm_struct
*area
;
118 unsigned long offset
;
119 unsigned long last_addr
;
122 /* Don't allow wraparound or zero size */
123 last_addr
= phys_addr
+ size
- 1;
125 if (!size
|| last_addr
< phys_addr
)
128 /* Don't allow anybody to remap normal RAM that we're using */
129 if (phys_addr
> PHYS_OFFSET
&& phys_addr
< virt_to_phys(high_memory
)) {
130 char *t_addr
, *t_end
;
133 t_addr
= __va(phys_addr
);
134 t_end
= t_addr
+ (size
- 1);
135 for (page
= virt_to_page(t_addr
);
136 page
<= virt_to_page(t_end
); page
++)
137 if (!PageReserved(page
))
142 * Map uncached objects in the low part of address space to
143 * CONFIG_NIOS2_IO_REGION_BASE
145 if (IS_MAPPABLE_UNCACHEABLE(phys_addr
) &&
146 IS_MAPPABLE_UNCACHEABLE(last_addr
))
147 return (void __iomem
*)(CONFIG_NIOS2_IO_REGION_BASE
+ phys_addr
);
149 /* Mappings have to be page-aligned */
150 offset
= phys_addr
& ~PAGE_MASK
;
151 phys_addr
&= PAGE_MASK
;
152 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
155 area
= get_vm_area(size
, VM_IOREMAP
);
159 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, 0)) {
163 return (void __iomem
*) (offset
+ (char *)addr
);
165 EXPORT_SYMBOL(ioremap
);
168 * iounmap unmaps nearly everything, so be careful
169 * it doesn't free currently pointer/page tables anymore but it
170 * wasn't used anyway and might be added later.
172 void iounmap(void __iomem
*addr
)
176 if ((unsigned long) addr
> CONFIG_NIOS2_IO_REGION_BASE
)
179 p
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long __force
) addr
));
181 pr_err("iounmap: bad address %p\n", addr
);
184 EXPORT_SYMBOL(iounmap
);