2 * flexible mmap layout support
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Started by Ingo Molnar <mingo@elte.hu>
25 #include <linux/personality.h>
27 #include <linux/mman.h>
28 #include <linux/module.h>
29 #include <linux/random.h>
30 #include <linux/compat.h>
31 #include <asm/pgalloc.h>
33 static unsigned long stack_maxrandom_size(void)
35 if (!(current
->flags
& PF_RANDOMIZE
))
37 if (current
->personality
& ADDR_NO_RANDOMIZE
)
39 return STACK_RND_MASK
<< PAGE_SHIFT
;
43 * Top of mmap area (just below the process stack).
45 * Leave at least a ~32 MB hole.
47 #define MIN_GAP (32*1024*1024)
48 #define MAX_GAP (STACK_TOP/6*5)
50 static inline int mmap_is_legacy(void)
52 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
54 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
56 return sysctl_legacy_va_layout
;
59 static unsigned long mmap_rnd(void)
61 if (!(current
->flags
& PF_RANDOMIZE
))
63 /* 8MB randomization for mmap_base */
64 return (get_random_int() & 0x7ffUL
) << PAGE_SHIFT
;
67 static inline unsigned long mmap_base(void)
69 unsigned long gap
= rlimit(RLIMIT_STACK
);
73 else if (gap
> MAX_GAP
)
76 return STACK_TOP
- stack_maxrandom_size() - mmap_rnd() - gap
;
82 * This function, called very early during the creation of a new
83 * process VM image, sets up which VM layout function to use:
85 void arch_pick_mmap_layout(struct mm_struct
*mm
)
88 * Fall back to the standard layout if the personality
89 * bit is set, or if the expected stack growth is unlimited:
91 if (mmap_is_legacy()) {
92 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
93 mm
->get_unmapped_area
= arch_get_unmapped_area
;
94 mm
->unmap_area
= arch_unmap_area
;
96 mm
->mmap_base
= mmap_base();
97 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
98 mm
->unmap_area
= arch_unmap_area_topdown
;
104 int s390_mmap_check(unsigned long addr
, unsigned long len
, unsigned long flags
)
108 if (is_compat_task() || (TASK_SIZE
>= (1UL << 53)))
110 if (!(flags
& MAP_FIXED
))
112 if ((addr
+ len
) >= TASK_SIZE
) {
113 rc
= crst_table_upgrade(current
->mm
, 1UL << 53);
116 update_mm(current
->mm
, current
);
122 s390_get_unmapped_area(struct file
*filp
, unsigned long addr
,
123 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
125 struct mm_struct
*mm
= current
->mm
;
129 area
= arch_get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
130 if (!(area
& ~PAGE_MASK
))
132 if (area
== -ENOMEM
&& !is_compat_task() && TASK_SIZE
< (1UL << 53)) {
133 /* Upgrade the page table to 4 levels and retry. */
134 rc
= crst_table_upgrade(mm
, 1UL << 53);
136 return (unsigned long) rc
;
137 update_mm(mm
, current
);
138 area
= arch_get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
144 s390_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr
,
145 const unsigned long len
, const unsigned long pgoff
,
146 const unsigned long flags
)
148 struct mm_struct
*mm
= current
->mm
;
152 area
= arch_get_unmapped_area_topdown(filp
, addr
, len
, pgoff
, flags
);
153 if (!(area
& ~PAGE_MASK
))
155 if (area
== -ENOMEM
&& !is_compat_task() && TASK_SIZE
< (1UL << 53)) {
156 /* Upgrade the page table to 4 levels and retry. */
157 rc
= crst_table_upgrade(mm
, 1UL << 53);
159 return (unsigned long) rc
;
160 update_mm(mm
, current
);
161 area
= arch_get_unmapped_area_topdown(filp
, addr
, len
,
167 * This function, called very early during the creation of a new
168 * process VM image, sets up which VM layout function to use:
170 void arch_pick_mmap_layout(struct mm_struct
*mm
)
173 * Fall back to the standard layout if the personality
174 * bit is set, or if the expected stack growth is unlimited:
176 if (mmap_is_legacy()) {
177 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
178 mm
->get_unmapped_area
= s390_get_unmapped_area
;
179 mm
->unmap_area
= arch_unmap_area
;
181 mm
->mmap_base
= mmap_base();
182 mm
->get_unmapped_area
= s390_get_unmapped_area_topdown
;
183 mm
->unmap_area
= arch_unmap_area_topdown
;