KVM: nVMX: Fix returned value of MSR_IA32_VMX_VMCS_ENUM
[linux/fpc-iii.git] / arch / x86 / mm / ioremap.c
blobbaff1da354e0ecfaf371b4e9f30ac6533d657a28
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
25 #include "physaddr.h"
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
34 unsigned long nrpages = size >> PAGE_SHIFT;
35 int err;
37 switch (prot_val) {
38 case _PAGE_CACHE_UC:
39 default:
40 err = _set_memory_uc(vaddr, nrpages);
41 break;
42 case _PAGE_CACHE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
45 case _PAGE_CACHE_WB:
46 err = _set_memory_wb(vaddr, nrpages);
47 break;
50 return err;
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54 void *arg)
56 unsigned long i;
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
61 return 1;
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
65 return 0;
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
71 * directly.
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
77 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78 unsigned long size, unsigned long prot_val, void *caller)
80 unsigned long offset, vaddr;
81 resource_size_t pfn, last_pfn, last_addr;
82 const resource_size_t unaligned_phys_addr = phys_addr;
83 const unsigned long unaligned_size = size;
84 struct vm_struct *area;
85 unsigned long new_prot_val;
86 pgprot_t prot;
87 int retval;
88 void __iomem *ret_addr;
90 /* Don't allow wraparound or zero size */
91 last_addr = phys_addr + size - 1;
92 if (!size || last_addr < phys_addr)
93 return NULL;
95 if (!phys_addr_valid(phys_addr)) {
96 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
97 (unsigned long long)phys_addr);
98 WARN_ON_ONCE(1);
99 return NULL;
103 * Don't remap the low PCI/ISA area, it's always mapped..
105 if (is_ISA_range(phys_addr, last_addr))
106 return (__force void __iomem *)phys_to_virt(phys_addr);
109 * Don't allow anybody to remap normal RAM that we're using..
111 pfn = phys_addr >> PAGE_SHIFT;
112 last_pfn = last_addr >> PAGE_SHIFT;
113 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
114 __ioremap_check_ram) == 1)
115 return NULL;
118 * Mappings have to be page-aligned
120 offset = phys_addr & ~PAGE_MASK;
121 phys_addr &= PHYSICAL_PAGE_MASK;
122 size = PAGE_ALIGN(last_addr+1) - phys_addr;
124 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
125 prot_val, &new_prot_val);
126 if (retval) {
127 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
128 return NULL;
131 if (prot_val != new_prot_val) {
132 if (!is_new_memtype_allowed(phys_addr, size,
133 prot_val, new_prot_val)) {
134 printk(KERN_ERR
135 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
136 (unsigned long long)phys_addr,
137 (unsigned long long)(phys_addr + size),
138 prot_val, new_prot_val);
139 goto err_free_memtype;
141 prot_val = new_prot_val;
144 switch (prot_val) {
145 case _PAGE_CACHE_UC:
146 default:
147 prot = PAGE_KERNEL_IO_NOCACHE;
148 break;
149 case _PAGE_CACHE_UC_MINUS:
150 prot = PAGE_KERNEL_IO_UC_MINUS;
151 break;
152 case _PAGE_CACHE_WC:
153 prot = PAGE_KERNEL_IO_WC;
154 break;
155 case _PAGE_CACHE_WB:
156 prot = PAGE_KERNEL_IO;
157 break;
161 * Ok, go for it..
163 area = get_vm_area_caller(size, VM_IOREMAP, caller);
164 if (!area)
165 goto err_free_memtype;
166 area->phys_addr = phys_addr;
167 vaddr = (unsigned long) area->addr;
169 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
170 goto err_free_area;
172 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
173 goto err_free_area;
175 ret_addr = (void __iomem *) (vaddr + offset);
176 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
179 * Check if the request spans more than any BAR in the iomem resource
180 * tree.
182 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
183 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
185 return ret_addr;
186 err_free_area:
187 free_vm_area(area);
188 err_free_memtype:
189 free_memtype(phys_addr, phys_addr + size);
190 return NULL;
194 * ioremap_nocache - map bus memory into CPU space
195 * @phys_addr: bus address of the memory
196 * @size: size of the resource to map
198 * ioremap_nocache performs a platform specific sequence of operations to
199 * make bus memory CPU accessible via the readb/readw/readl/writeb/
200 * writew/writel functions and the other mmio helpers. The returned
201 * address is not guaranteed to be usable directly as a virtual
202 * address.
204 * This version of ioremap ensures that the memory is marked uncachable
205 * on the CPU as well as honouring existing caching rules from things like
206 * the PCI bus. Note that there are other caches and buffers on many
207 * busses. In particular driver authors should read up on PCI writes
209 * It's useful if some control registers are in such an area and
210 * write combining or read caching is not desirable:
212 * Must be freed with iounmap.
214 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
217 * Ideally, this should be:
218 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
220 * Till we fix all X drivers to use ioremap_wc(), we will use
221 * UC MINUS.
223 unsigned long val = _PAGE_CACHE_UC_MINUS;
225 return __ioremap_caller(phys_addr, size, val,
226 __builtin_return_address(0));
228 EXPORT_SYMBOL(ioremap_nocache);
231 * ioremap_wc - map memory into CPU space write combined
232 * @phys_addr: bus address of the memory
233 * @size: size of the resource to map
235 * This version of ioremap ensures that the memory is marked write combining.
236 * Write combining allows faster writes to some hardware devices.
238 * Must be freed with iounmap.
240 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
242 if (pat_enabled)
243 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
244 __builtin_return_address(0));
245 else
246 return ioremap_nocache(phys_addr, size);
248 EXPORT_SYMBOL(ioremap_wc);
250 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
252 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
253 __builtin_return_address(0));
255 EXPORT_SYMBOL(ioremap_cache);
257 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
258 unsigned long prot_val)
260 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
261 __builtin_return_address(0));
263 EXPORT_SYMBOL(ioremap_prot);
266 * iounmap - Free a IO remapping
267 * @addr: virtual address from ioremap_*
269 * Caller must ensure there is only one unmapping for the same pointer.
271 void iounmap(volatile void __iomem *addr)
273 struct vm_struct *p, *o;
275 if ((void __force *)addr <= high_memory)
276 return;
279 * __ioremap special-cases the PCI/ISA range by not instantiating a
280 * vm_area and by simply returning an address into the kernel mapping
281 * of ISA space. So handle that here.
283 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
284 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
285 return;
287 addr = (volatile void __iomem *)
288 (PAGE_MASK & (unsigned long __force)addr);
290 mmiotrace_iounmap(addr);
292 /* Use the vm area unlocked, assuming the caller
293 ensures there isn't another iounmap for the same address
294 in parallel. Reuse of the virtual address is prevented by
295 leaving it in the global lists until we're done with it.
296 cpa takes care of the direct mappings. */
297 p = find_vm_area((void __force *)addr);
299 if (!p) {
300 printk(KERN_ERR "iounmap: bad address %p\n", addr);
301 dump_stack();
302 return;
305 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
307 /* Finally remove it */
308 o = remove_vm_area((void __force *)addr);
309 BUG_ON(p != o || o == NULL);
310 kfree(p);
312 EXPORT_SYMBOL(iounmap);
315 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
316 * access
318 void *xlate_dev_mem_ptr(unsigned long phys)
320 void *addr;
321 unsigned long start = phys & PAGE_MASK;
323 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
324 if (page_is_ram(start >> PAGE_SHIFT))
325 return __va(phys);
327 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
328 if (addr)
329 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
331 return addr;
334 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
336 if (page_is_ram(phys >> PAGE_SHIFT))
337 return;
339 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
340 return;
343 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
345 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
347 /* Don't assume we're using swapper_pg_dir at this point */
348 pgd_t *base = __va(read_cr3());
349 pgd_t *pgd = &base[pgd_index(addr)];
350 pud_t *pud = pud_offset(pgd, addr);
351 pmd_t *pmd = pmd_offset(pud, addr);
353 return pmd;
356 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
358 return &bm_pte[pte_index(addr)];
361 bool __init is_early_ioremap_ptep(pte_t *ptep)
363 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
366 void __init early_ioremap_init(void)
368 pmd_t *pmd;
370 #ifdef CONFIG_X86_64
371 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
372 #else
373 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
374 #endif
376 early_ioremap_setup();
378 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
379 memset(bm_pte, 0, sizeof(bm_pte));
380 pmd_populate_kernel(&init_mm, pmd, bm_pte);
383 * The boot-ioremap range spans multiple pmds, for which
384 * we are not prepared:
386 #define __FIXADDR_TOP (-PAGE_SIZE)
387 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
388 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
389 #undef __FIXADDR_TOP
390 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
391 WARN_ON(1);
392 printk(KERN_WARNING "pmd %p != %p\n",
393 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
394 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
395 fix_to_virt(FIX_BTMAP_BEGIN));
396 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
397 fix_to_virt(FIX_BTMAP_END));
399 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
400 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
401 FIX_BTMAP_BEGIN);
405 void __init __early_set_fixmap(enum fixed_addresses idx,
406 phys_addr_t phys, pgprot_t flags)
408 unsigned long addr = __fix_to_virt(idx);
409 pte_t *pte;
411 if (idx >= __end_of_fixed_addresses) {
412 BUG();
413 return;
415 pte = early_ioremap_pte(addr);
417 if (pgprot_val(flags))
418 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
419 else
420 pte_clear(&init_mm, addr, pte);
421 __flush_tlb_one(addr);