"[PATCH] Fix leaks on /proc/{*/sched,sched_debug,timer_list,timer_stats}" and
[mmotm.git] / arch / x86 / mm / iomap_32.c
blob84e236ce76ba9a8afd624cfc4c506ebaa654b926
1 /*
2 * Copyright © 2008 Ingo Molnar
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
19 #include <asm/iomap.h>
20 #include <asm/pat.h>
21 #include <linux/module.h>
22 #include <linux/highmem.h>
24 static int is_io_mapping_possible(resource_size_t base, unsigned long size)
26 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
27 /* There is no way to map greater than 1 << 32 address without PAE */
28 if (base + size > 0x100000000ULL)
29 return 0;
30 #endif
31 return 1;
34 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
36 unsigned long flag = _PAGE_CACHE_WC;
37 int ret;
39 if (!is_io_mapping_possible(base, size))
40 return -EINVAL;
42 ret = io_reserve_memtype(base, base + size, &flag);
43 if (ret)
44 return ret;
46 *prot = __pgprot(__PAGE_KERNEL | flag);
47 return 0;
49 EXPORT_SYMBOL_GPL(iomap_create_wc);
51 void
52 iomap_free(resource_size_t base, unsigned long size)
54 io_free_memtype(base, base + size);
56 EXPORT_SYMBOL_GPL(iomap_free);
58 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
60 enum fixed_addresses idx;
61 unsigned long vaddr;
63 pagefault_disable();
65 debug_kmap_atomic(type);
66 idx = type + KM_TYPE_NR * smp_processor_id();
67 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
68 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
69 arch_flush_lazy_mmu_mode();
71 return (void *)vaddr;
75 * Map 'pfn' using fixed map 'type' and protections 'prot'
77 void *
78 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
81 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
82 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
83 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
84 * user, which is "WC if the MTRR is WC, UC if you can't do that."
86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 prot = PAGE_KERNEL_UC_MINUS;
89 return kmap_atomic_prot_pfn(pfn, type, prot);
91 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
93 void
94 iounmap_atomic(void *kvaddr, enum km_type type)
96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
100 * Force other mappings to Oops if they'll try to access this pte
101 * without first remap it. Keeping stale mappings around is a bad idea
102 * also, in case the page changes cacheability attributes or becomes
103 * a protected page in a hypervisor.
105 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
106 kpte_clear_flush(kmap_pte-idx, vaddr);
108 pagefault_enable();
110 EXPORT_SYMBOL_GPL(iounmap_atomic);