SLUB: Fix memory leak by not reusing cpu_slab
[pv_ops_mirror.git] / arch / arm / mm / mmap.c
blob2728b0e7d2bbd9a165e3f031454b9b5f1f7fc8aa
1 /*
2 * linux/arch/arm/mm/mmap.c
3 */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <asm/system.h>
11 #define COLOUR_ALIGN(addr,pgoff) \
12 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
13 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
16 * We need to ensure that shared mappings are correctly aligned to
17 * avoid aliasing issues with VIPT caches. We need to ensure that
18 * a specific page of an object is always mapped at a multiple of
19 * SHMLBA bytes.
21 * We unconditionally provide this function for all cases, however
22 * in the VIVT case, we optimise out the alignment rules.
24 unsigned long
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
28 struct mm_struct *mm = current->mm;
29 struct vm_area_struct *vma;
30 unsigned long start_addr;
31 #ifdef CONFIG_CPU_V6
32 unsigned int cache_type;
33 int do_align = 0, aliasing = 0;
36 * We only need to do colour alignment if either the I or D
37 * caches alias. This is indicated by bits 9 and 21 of the
38 * cache type register.
40 cache_type = read_cpuid(CPUID_CACHETYPE);
41 if (cache_type != read_cpuid(CPUID_ID)) {
42 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43 if (aliasing)
44 do_align = filp || flags & MAP_SHARED;
46 #else
47 #define do_align 0
48 #define aliasing 0
49 #endif
52 * We enforce the MAP_FIXED case.
54 if (flags & MAP_FIXED) {
55 if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
56 return -EINVAL;
57 return addr;
60 if (len > TASK_SIZE)
61 return -ENOMEM;
63 if (addr) {
64 if (do_align)
65 addr = COLOUR_ALIGN(addr, pgoff);
66 else
67 addr = PAGE_ALIGN(addr);
69 vma = find_vma(mm, addr);
70 if (TASK_SIZE - len >= addr &&
71 (!vma || addr + len <= vma->vm_start))
72 return addr;
74 if (len > mm->cached_hole_size) {
75 start_addr = addr = mm->free_area_cache;
76 } else {
77 start_addr = addr = TASK_UNMAPPED_BASE;
78 mm->cached_hole_size = 0;
81 full_search:
82 if (do_align)
83 addr = COLOUR_ALIGN(addr, pgoff);
84 else
85 addr = PAGE_ALIGN(addr);
87 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
88 /* At this point: (!vma || addr < vma->vm_end). */
89 if (TASK_SIZE - len < addr) {
91 * Start a new search - just in case we missed
92 * some holes.
94 if (start_addr != TASK_UNMAPPED_BASE) {
95 start_addr = addr = TASK_UNMAPPED_BASE;
96 mm->cached_hole_size = 0;
97 goto full_search;
99 return -ENOMEM;
101 if (!vma || addr + len <= vma->vm_start) {
103 * Remember the place where we stopped the search:
105 mm->free_area_cache = addr + len;
106 return addr;
108 if (addr + mm->cached_hole_size < vma->vm_start)
109 mm->cached_hole_size = vma->vm_start - addr;
110 addr = vma->vm_end;
111 if (do_align)
112 addr = COLOUR_ALIGN(addr, pgoff);
118 * You really shouldn't be using read() or write() on /dev/mem. This
119 * might go away in the future.
121 int valid_phys_addr_range(unsigned long addr, size_t size)
123 if (addr + size > __pa(high_memory))
124 return 0;
126 return 1;
130 * We don't use supersection mappings for mmap() on /dev/mem, which
131 * means that we can't map the memory area above the 4G barrier into
132 * userspace.
134 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
136 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);