fs/reiserfs/journal.c: change return type of dirty_one_transaction
[linux/fpc-iii.git] / arch / s390 / mm / mmap.c
blobcbc718ba6d7876411a4fd0f48f5a8d8c722d9298
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * flexible mmap layout support
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
8 * Started by Ingo Molnar <mingo@elte.hu>
9 */
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <asm/pgalloc.h>
21 #include <asm/elf.h>
23 static unsigned long stack_maxrandom_size(void)
25 if (!(current->flags & PF_RANDOMIZE))
26 return 0;
27 return STACK_RND_MASK << PAGE_SHIFT;
30 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
32 if (current->personality & ADDR_COMPAT_LAYOUT)
33 return 1;
34 if (rlim_stack->rlim_cur == RLIM_INFINITY)
35 return 1;
36 return sysctl_legacy_va_layout;
39 unsigned long arch_mmap_rnd(void)
41 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
44 static unsigned long mmap_base_legacy(unsigned long rnd)
46 return TASK_UNMAPPED_BASE + rnd;
49 static inline unsigned long mmap_base(unsigned long rnd,
50 struct rlimit *rlim_stack)
52 unsigned long gap = rlim_stack->rlim_cur;
53 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
54 unsigned long gap_min, gap_max;
56 /* Values close to RLIM_INFINITY can overflow. */
57 if (gap + pad > gap)
58 gap += pad;
61 * Top of mmap area (just below the process stack).
62 * Leave at least a ~32 MB hole.
64 gap_min = 32 * 1024 * 1024UL;
65 gap_max = (STACK_TOP / 6) * 5;
67 if (gap < gap_min)
68 gap = gap_min;
69 else if (gap > gap_max)
70 gap = gap_max;
72 return PAGE_ALIGN(STACK_TOP - gap - rnd);
75 unsigned long
76 arch_get_unmapped_area(struct file *filp, unsigned long addr,
77 unsigned long len, unsigned long pgoff, unsigned long flags)
79 struct mm_struct *mm = current->mm;
80 struct vm_area_struct *vma;
81 struct vm_unmapped_area_info info;
82 int rc;
84 if (len > TASK_SIZE - mmap_min_addr)
85 return -ENOMEM;
87 if (flags & MAP_FIXED)
88 goto check_asce_limit;
90 if (addr) {
91 addr = PAGE_ALIGN(addr);
92 vma = find_vma(mm, addr);
93 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
94 (!vma || addr + len <= vm_start_gap(vma)))
95 goto check_asce_limit;
98 info.flags = 0;
99 info.length = len;
100 info.low_limit = mm->mmap_base;
101 info.high_limit = TASK_SIZE;
102 if (filp || (flags & MAP_SHARED))
103 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
104 else
105 info.align_mask = 0;
106 info.align_offset = pgoff << PAGE_SHIFT;
107 addr = vm_unmapped_area(&info);
108 if (addr & ~PAGE_MASK)
109 return addr;
111 check_asce_limit:
112 if (addr + len > current->mm->context.asce_limit &&
113 addr + len <= TASK_SIZE) {
114 rc = crst_table_upgrade(mm, addr + len);
115 if (rc)
116 return (unsigned long) rc;
119 return addr;
122 unsigned long
123 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
124 const unsigned long len, const unsigned long pgoff,
125 const unsigned long flags)
127 struct vm_area_struct *vma;
128 struct mm_struct *mm = current->mm;
129 unsigned long addr = addr0;
130 struct vm_unmapped_area_info info;
131 int rc;
133 /* requested length too big for entire address space */
134 if (len > TASK_SIZE - mmap_min_addr)
135 return -ENOMEM;
137 if (flags & MAP_FIXED)
138 goto check_asce_limit;
140 /* requesting a specific address */
141 if (addr) {
142 addr = PAGE_ALIGN(addr);
143 vma = find_vma(mm, addr);
144 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
145 (!vma || addr + len <= vm_start_gap(vma)))
146 goto check_asce_limit;
149 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
150 info.length = len;
151 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
152 info.high_limit = mm->mmap_base;
153 if (filp || (flags & MAP_SHARED))
154 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
155 else
156 info.align_mask = 0;
157 info.align_offset = pgoff << PAGE_SHIFT;
158 addr = vm_unmapped_area(&info);
161 * A failed mmap() very likely causes application failure,
162 * so fall back to the bottom-up function here. This scenario
163 * can happen with large stack limits and large mmap()
164 * allocations.
166 if (addr & ~PAGE_MASK) {
167 VM_BUG_ON(addr != -ENOMEM);
168 info.flags = 0;
169 info.low_limit = TASK_UNMAPPED_BASE;
170 info.high_limit = TASK_SIZE;
171 addr = vm_unmapped_area(&info);
172 if (addr & ~PAGE_MASK)
173 return addr;
176 check_asce_limit:
177 if (addr + len > current->mm->context.asce_limit &&
178 addr + len <= TASK_SIZE) {
179 rc = crst_table_upgrade(mm, addr + len);
180 if (rc)
181 return (unsigned long) rc;
184 return addr;
188 * This function, called very early during the creation of a new
189 * process VM image, sets up which VM layout function to use:
191 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
193 unsigned long random_factor = 0UL;
195 if (current->flags & PF_RANDOMIZE)
196 random_factor = arch_mmap_rnd();
199 * Fall back to the standard layout if the personality
200 * bit is set, or if the expected stack growth is unlimited:
202 if (mmap_is_legacy(rlim_stack)) {
203 mm->mmap_base = mmap_base_legacy(random_factor);
204 mm->get_unmapped_area = arch_get_unmapped_area;
205 } else {
206 mm->mmap_base = mmap_base(random_factor, rlim_stack);
207 mm->get_unmapped_area = arch_get_unmapped_area_topdown;