2 * linux/arch/s390/mm/mmap.c
4 * flexible mmap layout support
6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Started by Ingo Molnar <mingo@elte.hu>
27 #include <linux/personality.h>
29 #include <linux/module.h>
30 #include <asm/pgalloc.h>
33 * Top of mmap area (just below the process stack).
35 * Leave an at least ~128 MB hole.
37 #define MIN_GAP (128*1024*1024)
38 #define MAX_GAP (TASK_SIZE/6*5)
40 static inline unsigned long mmap_base(void)
42 unsigned long gap
= current
->signal
->rlim
[RLIMIT_STACK
].rlim_cur
;
46 else if (gap
> MAX_GAP
)
49 return TASK_SIZE
- (gap
& PAGE_MASK
);
52 static inline int mmap_is_legacy(void)
56 * Force standard allocation for 64 bit programs.
58 if (!test_thread_flag(TIF_31BIT
))
61 return sysctl_legacy_va_layout
||
62 (current
->personality
& ADDR_COMPAT_LAYOUT
) ||
63 current
->signal
->rlim
[RLIMIT_STACK
].rlim_cur
== RLIM_INFINITY
;
69 * This function, called very early during the creation of a new
70 * process VM image, sets up which VM layout function to use:
72 void arch_pick_mmap_layout(struct mm_struct
*mm
)
75 * Fall back to the standard layout if the personality
76 * bit is set, or if the expected stack growth is unlimited:
78 if (mmap_is_legacy()) {
79 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
80 mm
->get_unmapped_area
= arch_get_unmapped_area
;
81 mm
->unmap_area
= arch_unmap_area
;
83 mm
->mmap_base
= mmap_base();
84 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
85 mm
->unmap_area
= arch_unmap_area_topdown
;
88 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout
);
93 s390_get_unmapped_area(struct file
*filp
, unsigned long addr
,
94 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
96 struct mm_struct
*mm
= current
->mm
;
99 addr
= arch_get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
100 if (addr
& ~PAGE_MASK
)
102 if (unlikely(mm
->context
.asce_limit
< addr
+ len
)) {
103 rc
= crst_table_upgrade(mm
, addr
+ len
);
105 return (unsigned long) rc
;
111 s390_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
112 const unsigned long len
, const unsigned long pgoff
,
113 const unsigned long flags
)
115 struct mm_struct
*mm
= current
->mm
;
116 unsigned long addr
= addr0
;
119 addr
= arch_get_unmapped_area_topdown(filp
, addr
, len
, pgoff
, flags
);
120 if (addr
& ~PAGE_MASK
)
122 if (unlikely(mm
->context
.asce_limit
< addr
+ len
)) {
123 rc
= crst_table_upgrade(mm
, addr
+ len
);
125 return (unsigned long) rc
;
130 * This function, called very early during the creation of a new
131 * process VM image, sets up which VM layout function to use:
133 void arch_pick_mmap_layout(struct mm_struct
*mm
)
136 * Fall back to the standard layout if the personality
137 * bit is set, or if the expected stack growth is unlimited:
139 if (mmap_is_legacy()) {
140 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
141 mm
->get_unmapped_area
= s390_get_unmapped_area
;
142 mm
->unmap_area
= arch_unmap_area
;
144 mm
->mmap_base
= mmap_base();
145 mm
->get_unmapped_area
= s390_get_unmapped_area_topdown
;
146 mm
->unmap_area
= arch_unmap_area_topdown
;
149 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout
);