IB/srp: Let srp_abort() return FAST_IO_FAIL if TL offline
[linux/fpc-iii.git] / arch / arm / mm / mmap.c
blob10062ceadd1cc40ca48cc59e83f2deb6fc3e07da
1 /*
2 * linux/arch/arm/mm/mmap.c
3 */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/io.h>
10 #include <linux/personality.h>
11 #include <linux/random.h>
12 #include <asm/cachetype.h>
14 #define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
18 /* gap between mmap and stack */
19 #define MIN_GAP (128*1024*1024UL)
20 #define MAX_GAP ((TASK_SIZE)/6*5)
22 static int mmap_is_legacy(void)
24 if (current->personality & ADDR_COMPAT_LAYOUT)
25 return 1;
27 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
28 return 1;
30 return sysctl_legacy_va_layout;
33 static unsigned long mmap_base(unsigned long rnd)
35 unsigned long gap = rlimit(RLIMIT_STACK);
37 if (gap < MIN_GAP)
38 gap = MIN_GAP;
39 else if (gap > MAX_GAP)
40 gap = MAX_GAP;
42 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46 * We need to ensure that shared mappings are correctly aligned to
47 * avoid aliasing issues with VIPT caches. We need to ensure that
48 * a specific page of an object is always mapped at a multiple of
49 * SHMLBA bytes.
51 * We unconditionally provide this function for all cases, however
52 * in the VIVT case, we optimise out the alignment rules.
54 unsigned long
55 arch_get_unmapped_area(struct file *filp, unsigned long addr,
56 unsigned long len, unsigned long pgoff, unsigned long flags)
58 struct mm_struct *mm = current->mm;
59 struct vm_area_struct *vma;
60 int do_align = 0;
61 int aliasing = cache_is_vipt_aliasing();
62 struct vm_unmapped_area_info info;
65 * We only need to do colour alignment if either the I or D
66 * caches alias.
68 if (aliasing)
69 do_align = filp || (flags & MAP_SHARED);
72 * We enforce the MAP_FIXED case.
74 if (flags & MAP_FIXED) {
75 if (aliasing && flags & MAP_SHARED &&
76 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
77 return -EINVAL;
78 return addr;
81 if (len > TASK_SIZE)
82 return -ENOMEM;
84 if (addr) {
85 if (do_align)
86 addr = COLOUR_ALIGN(addr, pgoff);
87 else
88 addr = PAGE_ALIGN(addr);
90 vma = find_vma(mm, addr);
91 if (TASK_SIZE - len >= addr &&
92 (!vma || addr + len <= vma->vm_start))
93 return addr;
96 info.flags = 0;
97 info.length = len;
98 info.low_limit = mm->mmap_base;
99 info.high_limit = TASK_SIZE;
100 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
101 info.align_offset = pgoff << PAGE_SHIFT;
102 return vm_unmapped_area(&info);
105 unsigned long
106 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
107 const unsigned long len, const unsigned long pgoff,
108 const unsigned long flags)
110 struct vm_area_struct *vma;
111 struct mm_struct *mm = current->mm;
112 unsigned long addr = addr0;
113 int do_align = 0;
114 int aliasing = cache_is_vipt_aliasing();
115 struct vm_unmapped_area_info info;
118 * We only need to do colour alignment if either the I or D
119 * caches alias.
121 if (aliasing)
122 do_align = filp || (flags & MAP_SHARED);
124 /* requested length too big for entire address space */
125 if (len > TASK_SIZE)
126 return -ENOMEM;
128 if (flags & MAP_FIXED) {
129 if (aliasing && flags & MAP_SHARED &&
130 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
131 return -EINVAL;
132 return addr;
135 /* requesting a specific address */
136 if (addr) {
137 if (do_align)
138 addr = COLOUR_ALIGN(addr, pgoff);
139 else
140 addr = PAGE_ALIGN(addr);
141 vma = find_vma(mm, addr);
142 if (TASK_SIZE - len >= addr &&
143 (!vma || addr + len <= vma->vm_start))
144 return addr;
147 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148 info.length = len;
149 info.low_limit = PAGE_SIZE;
150 info.high_limit = mm->mmap_base;
151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152 info.align_offset = pgoff << PAGE_SHIFT;
153 addr = vm_unmapped_area(&info);
156 * A failed mmap() very likely causes application failure,
157 * so fall back to the bottom-up function here. This scenario
158 * can happen with large stack limits and large mmap()
159 * allocations.
161 if (addr & ~PAGE_MASK) {
162 VM_BUG_ON(addr != -ENOMEM);
163 info.flags = 0;
164 info.low_limit = mm->mmap_base;
165 info.high_limit = TASK_SIZE;
166 addr = vm_unmapped_area(&info);
169 return addr;
172 void arch_pick_mmap_layout(struct mm_struct *mm)
174 unsigned long random_factor = 0UL;
176 /* 8 bits of randomness in 20 address space bits */
177 if ((current->flags & PF_RANDOMIZE) &&
178 !(current->personality & ADDR_NO_RANDOMIZE))
179 random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
181 if (mmap_is_legacy()) {
182 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
183 mm->get_unmapped_area = arch_get_unmapped_area;
184 mm->unmap_area = arch_unmap_area;
185 } else {
186 mm->mmap_base = mmap_base(random_factor);
187 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
188 mm->unmap_area = arch_unmap_area_topdown;
193 * You really shouldn't be using read() or write() on /dev/mem. This
194 * might go away in the future.
196 int valid_phys_addr_range(phys_addr_t addr, size_t size)
198 if (addr < PHYS_OFFSET)
199 return 0;
200 if (addr + size > __pa(high_memory - 1) + 1)
201 return 0;
203 return 1;
207 * We don't use supersection mappings for mmap() on /dev/mem, which
208 * means that we can't map the memory area above the 4G barrier into
209 * userspace.
211 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
213 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
216 #ifdef CONFIG_STRICT_DEVMEM
218 #include <linux/ioport.h>
221 * devmem_is_allowed() checks to see if /dev/mem access to a certain
222 * address is valid. The argument is a physical page number.
223 * We mimic x86 here by disallowing access to system RAM as well as
224 * device-exclusive MMIO regions. This effectively disable read()/write()
225 * on /dev/mem.
227 int devmem_is_allowed(unsigned long pfn)
229 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
230 return 0;
231 if (!page_is_ram(pfn))
232 return 1;
233 return 0;
236 #endif