2 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
5 * Copyright (C) 2004 Microtronix Datacom Ltd.
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/export.h>
13 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 static inline void remap_area_pte(pte_t
*pte
, unsigned long address
,
23 unsigned long size
, unsigned long phys_addr
,
28 pgprot_t pgprot
= __pgprot(_PAGE_GLOBAL
| _PAGE_PRESENT
| _PAGE_READ
29 | _PAGE_WRITE
| flags
);
37 pfn
= PFN_DOWN(phys_addr
);
39 if (!pte_none(*pte
)) {
40 pr_err("remap_area_pte: page already exists\n");
43 set_pte(pte
, pfn_pte(pfn
, pgprot
));
47 } while (address
&& (address
< end
));
50 static inline int remap_area_pmd(pmd_t
*pmd
, unsigned long address
,
51 unsigned long size
, unsigned long phys_addr
,
56 address
&= ~PGDIR_MASK
;
64 pte_t
*pte
= pte_alloc_kernel(pmd
, address
);
68 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
,
70 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
72 } while (address
&& (address
< end
));
76 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
77 unsigned long size
, unsigned long flags
)
81 unsigned long end
= address
+ size
;
84 dir
= pgd_offset(&init_mm
, address
);
94 p4d
= p4d_alloc(&init_mm
, dir
, address
);
97 pud
= pud_alloc(&init_mm
, p4d
, address
);
100 pmd
= pmd_alloc(&init_mm
, pud
, address
);
103 if (remap_area_pmd(pmd
, address
, end
- address
,
104 phys_addr
+ address
, flags
))
107 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
109 } while (address
&& (address
< end
));
114 #define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
117 * Map some physical address range into the kernel address space.
119 void __iomem
*ioremap(unsigned long phys_addr
, unsigned long size
)
121 struct vm_struct
*area
;
122 unsigned long offset
;
123 unsigned long last_addr
;
126 /* Don't allow wraparound or zero size */
127 last_addr
= phys_addr
+ size
- 1;
129 if (!size
|| last_addr
< phys_addr
)
132 /* Don't allow anybody to remap normal RAM that we're using */
133 if (phys_addr
> PHYS_OFFSET
&& phys_addr
< virt_to_phys(high_memory
)) {
134 char *t_addr
, *t_end
;
137 t_addr
= __va(phys_addr
);
138 t_end
= t_addr
+ (size
- 1);
139 for (page
= virt_to_page(t_addr
);
140 page
<= virt_to_page(t_end
); page
++)
141 if (!PageReserved(page
))
146 * Map uncached objects in the low part of address space to
147 * CONFIG_NIOS2_IO_REGION_BASE
149 if (IS_MAPPABLE_UNCACHEABLE(phys_addr
) &&
150 IS_MAPPABLE_UNCACHEABLE(last_addr
))
151 return (void __iomem
*)(CONFIG_NIOS2_IO_REGION_BASE
+ phys_addr
);
153 /* Mappings have to be page-aligned */
154 offset
= phys_addr
& ~PAGE_MASK
;
155 phys_addr
&= PAGE_MASK
;
156 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
159 area
= get_vm_area(size
, VM_IOREMAP
);
163 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, 0)) {
167 return (void __iomem
*) (offset
+ (char *)addr
);
169 EXPORT_SYMBOL(ioremap
);
172 * iounmap unmaps nearly everything, so be careful
173 * it doesn't free currently pointer/page tables anymore but it
174 * wasn't used anyway and might be added later.
176 void iounmap(void __iomem
*addr
)
180 if ((unsigned long) addr
> CONFIG_NIOS2_IO_REGION_BASE
)
183 p
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long __force
) addr
));
185 pr_err("iounmap: bad address %p\n", addr
);
188 EXPORT_SYMBOL(iounmap
);