2 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
5 * Copyright (C) 2004 Microtronix Datacom Ltd.
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/export.h>
13 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 static inline void remap_area_pte(pte_t
*pte
, unsigned long address
,
23 unsigned long size
, unsigned long phys_addr
,
28 pgprot_t pgprot
= __pgprot(_PAGE_GLOBAL
| _PAGE_PRESENT
| _PAGE_READ
29 | _PAGE_WRITE
| flags
);
37 pfn
= PFN_DOWN(phys_addr
);
39 if (!pte_none(*pte
)) {
40 pr_err("remap_area_pte: page already exists\n");
43 set_pte(pte
, pfn_pte(pfn
, pgprot
));
47 } while (address
&& (address
< end
));
50 static inline int remap_area_pmd(pmd_t
*pmd
, unsigned long address
,
51 unsigned long size
, unsigned long phys_addr
,
56 address
&= ~PGDIR_MASK
;
64 pte_t
*pte
= pte_alloc_kernel(pmd
, address
);
68 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
,
70 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
72 } while (address
&& (address
< end
));
76 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
77 unsigned long size
, unsigned long flags
)
81 unsigned long end
= address
+ size
;
84 dir
= pgd_offset(&init_mm
, address
);
93 pud
= pud_alloc(&init_mm
, dir
, address
);
96 pmd
= pmd_alloc(&init_mm
, pud
, address
);
99 if (remap_area_pmd(pmd
, address
, end
- address
,
100 phys_addr
+ address
, flags
))
103 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
105 } while (address
&& (address
< end
));
110 #define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
113 * Map some physical address range into the kernel address space.
115 void __iomem
*__ioremap(unsigned long phys_addr
, unsigned long size
,
116 unsigned long cacheflag
)
118 struct vm_struct
*area
;
119 unsigned long offset
;
120 unsigned long last_addr
;
123 /* Don't allow wraparound or zero size */
124 last_addr
= phys_addr
+ size
- 1;
126 if (!size
|| last_addr
< phys_addr
)
129 /* Don't allow anybody to remap normal RAM that we're using */
130 if (phys_addr
> PHYS_OFFSET
&& phys_addr
< virt_to_phys(high_memory
)) {
131 char *t_addr
, *t_end
;
134 t_addr
= __va(phys_addr
);
135 t_end
= t_addr
+ (size
- 1);
136 for (page
= virt_to_page(t_addr
);
137 page
<= virt_to_page(t_end
); page
++)
138 if (!PageReserved(page
))
143 * Map uncached objects in the low part of address space to
144 * CONFIG_NIOS2_IO_REGION_BASE
146 if (IS_MAPPABLE_UNCACHEABLE(phys_addr
) &&
147 IS_MAPPABLE_UNCACHEABLE(last_addr
) &&
148 !(cacheflag
& _PAGE_CACHED
))
149 return (void __iomem
*)(CONFIG_NIOS2_IO_REGION_BASE
+ phys_addr
);
151 /* Mappings have to be page-aligned */
152 offset
= phys_addr
& ~PAGE_MASK
;
153 phys_addr
&= PAGE_MASK
;
154 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
157 area
= get_vm_area(size
, VM_IOREMAP
);
161 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
,
166 return (void __iomem
*) (offset
+ (char *)addr
);
168 EXPORT_SYMBOL(__ioremap
);
171 * __iounmap unmaps nearly everything, so be careful
172 * it doesn't free currently pointer/page tables anymore but it
173 * wasn't used anyway and might be added later.
175 void __iounmap(void __iomem
*addr
)
179 if ((unsigned long) addr
> CONFIG_NIOS2_IO_REGION_BASE
)
182 p
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long __force
) addr
));
184 pr_err("iounmap: bad address %p\n", addr
);
187 EXPORT_SYMBOL(__iounmap
);