Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / microblaze / mm / pgtable.c
blob7f525962cdfaa0951321bbdccfc4809a140cf6cf
1 /*
2 * This file contains the routines setting up the linux page tables.
4 * Copyright (C) 2008 Michal Simek
5 * Copyright (C) 2008 PetaLogix
7 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
9 * Derived from arch/ppc/mm/pgtable.c:
10 * -- paulus
12 * Derived from arch/ppc/mm/init.c:
13 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
17 * Copyright (C) 1996 Paul Mackerras
18 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This file is subject to the terms and conditions of the GNU General
24 * Public License. See the file COPYING in the main directory of this
25 * archive for more details.
29 #include <linux/export.h>
30 #include <linux/kernel.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34 #include <linux/mm_types.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <linux/io.h>
39 #include <asm/mmu.h>
40 #include <asm/sections.h>
41 #include <asm/fixmap.h>
43 unsigned long ioremap_base;
44 unsigned long ioremap_bot;
45 EXPORT_SYMBOL(ioremap_bot);
47 #ifndef CONFIG_SMP
48 struct pgtable_cache_struct quicklists;
49 #endif
51 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
52 unsigned long flags)
54 unsigned long v, i;
55 phys_addr_t p;
56 int err;
59 * Choose an address to map it to.
60 * Once the vmalloc system is running, we use it.
61 * Before then, we use space going down from ioremap_base
62 * (ioremap_bot records where we're up to).
64 p = addr & PAGE_MASK;
65 size = PAGE_ALIGN(addr + size) - p;
68 * Don't allow anybody to remap normal RAM that we're using.
69 * mem_init() sets high_memory so only do the check after that.
71 * However, allow remap of rootfs: TBD
74 if (mem_init_done &&
75 p >= memory_start && p < virt_to_phys(high_memory) &&
76 !(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
77 p < __virt_to_phys((phys_addr_t)__bss_stop))) {
78 pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
79 (unsigned long)p, __builtin_return_address(0));
80 return NULL;
83 if (size == 0)
84 return NULL;
87 * Is it already mapped? If the whole area is mapped then we're
88 * done, otherwise remap it since we want to keep the virt addrs for
89 * each request contiguous.
91 * We make the assumption here that if the bottom and top
92 * of the range we want are mapped then it's mapped to the
93 * same virt address (and this is contiguous).
94 * -- Cort
97 if (mem_init_done) {
98 struct vm_struct *area;
99 area = get_vm_area(size, VM_IOREMAP);
100 if (area == NULL)
101 return NULL;
102 v = (unsigned long) area->addr;
103 } else {
104 v = (ioremap_bot -= size);
107 if ((flags & _PAGE_PRESENT) == 0)
108 flags |= _PAGE_KERNEL;
109 if (flags & _PAGE_NO_CACHE)
110 flags |= _PAGE_GUARDED;
112 err = 0;
113 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
114 err = map_page(v + i, p + i, flags);
115 if (err) {
116 if (mem_init_done)
117 vfree((void *)v);
118 return NULL;
121 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
124 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
126 return __ioremap(addr, size, _PAGE_NO_CACHE);
128 EXPORT_SYMBOL(ioremap);
130 void iounmap(volatile void __iomem *addr)
132 if ((__force void *)addr > high_memory &&
133 (unsigned long) addr < ioremap_bot)
134 vfree((void *) (PAGE_MASK & (unsigned long) addr));
136 EXPORT_SYMBOL(iounmap);
139 int map_page(unsigned long va, phys_addr_t pa, int flags)
141 pmd_t *pd;
142 pte_t *pg;
143 int err = -ENOMEM;
144 /* Use upper 10 bits of VA to index the first level map */
145 pd = pmd_offset(pgd_offset_k(va), va);
146 /* Use middle 10 bits of VA to index the second-level map */
147 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
148 /* pg = pte_alloc_kernel(&init_mm, pd, va); */
150 if (pg != NULL) {
151 err = 0;
152 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
153 __pgprot(flags)));
154 if (unlikely(mem_init_done))
155 _tlbie(va);
157 return err;
161 * Map in all of physical memory starting at CONFIG_KERNEL_START.
163 void __init mapin_ram(void)
165 unsigned long v, p, s, f;
167 v = CONFIG_KERNEL_START;
168 p = memory_start;
169 for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
170 f = _PAGE_PRESENT | _PAGE_ACCESSED |
171 _PAGE_SHARED | _PAGE_HWEXEC;
172 if ((char *) v < _stext || (char *) v >= _etext)
173 f |= _PAGE_WRENABLE;
174 else
175 /* On the MicroBlaze, no user access
176 forces R/W kernel access */
177 f |= _PAGE_USER;
178 map_page(v, p, f);
179 v += PAGE_SIZE;
180 p += PAGE_SIZE;
184 /* is x a power of 2? */
185 #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
187 /* Scan the real Linux page tables and return a PTE pointer for
188 * a virtual address in a context.
189 * Returns true (1) if PTE was found, zero otherwise. The pointer to
190 * the PTE pointer is unmodified if PTE is not found.
192 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
194 pgd_t *pgd;
195 pmd_t *pmd;
196 pte_t *pte;
197 int retval = 0;
199 pgd = pgd_offset(mm, addr & PAGE_MASK);
200 if (pgd) {
201 pmd = pmd_offset(pgd, addr & PAGE_MASK);
202 if (pmd_present(*pmd)) {
203 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
204 if (pte) {
205 retval = 1;
206 *ptep = pte;
210 return retval;
213 /* Find physical address for this virtual address. Normally used by
214 * I/O functions, but anyone can call it.
216 unsigned long iopa(unsigned long addr)
218 unsigned long pa;
220 pte_t *pte;
221 struct mm_struct *mm;
223 /* Allow mapping of user addresses (within the thread)
224 * for DMA if necessary.
226 if (addr < TASK_SIZE)
227 mm = current->mm;
228 else
229 mm = &init_mm;
231 pa = 0;
232 if (get_pteptr(mm, addr, &pte))
233 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
235 return pa;
238 __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
239 unsigned long address)
241 pte_t *pte;
242 if (mem_init_done) {
243 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
244 } else {
245 pte = (pte_t *)early_get_page();
246 if (pte)
247 clear_page(pte);
249 return pte;
252 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
254 unsigned long address = __fix_to_virt(idx);
256 if (idx >= __end_of_fixed_addresses)
257 BUG();
259 map_page(address, phys, pgprot_val(flags));