2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/module.h>
37 static DEFINE_SPINLOCK(slice_convert_lock
);
43 static void slice_print_mask(const char *label
, struct slice_mask mask
)
45 char *p
, buf
[16 + 3 + 16 + 1];
51 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
52 *(p
++) = (mask
.low_slices
& (1 << i
)) ? '1' : '0';
56 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
57 *(p
++) = (mask
.high_slices
& (1 << i
)) ? '1' : '0';
60 printk(KERN_DEBUG
"%s:%s\n", label
, buf
);
63 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
67 static void slice_print_mask(const char *label
, struct slice_mask mask
) {}
68 #define slice_dbg(fmt...)
72 static struct slice_mask
slice_range_to_mask(unsigned long start
,
75 unsigned long end
= start
+ len
- 1;
76 struct slice_mask ret
= { 0, 0 };
78 if (start
< SLICE_LOW_TOP
) {
79 unsigned long mend
= min(end
, SLICE_LOW_TOP
);
80 unsigned long mstart
= min(start
, SLICE_LOW_TOP
);
82 ret
.low_slices
= (1u << (GET_LOW_SLICE_INDEX(mend
) + 1))
83 - (1u << GET_LOW_SLICE_INDEX(mstart
));
86 if ((start
+ len
) > SLICE_LOW_TOP
)
87 ret
.high_slices
= (1u << (GET_HIGH_SLICE_INDEX(end
) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start
));
93 static int slice_area_is_free(struct mm_struct
*mm
, unsigned long addr
,
96 struct vm_area_struct
*vma
;
98 if ((mm
->task_size
- len
) < addr
)
100 vma
= find_vma(mm
, addr
);
101 return (!vma
|| (addr
+ len
) <= vma
->vm_start
);
104 static int slice_low_has_vma(struct mm_struct
*mm
, unsigned long slice
)
106 return !slice_area_is_free(mm
, slice
<< SLICE_LOW_SHIFT
,
107 1ul << SLICE_LOW_SHIFT
);
110 static int slice_high_has_vma(struct mm_struct
*mm
, unsigned long slice
)
112 unsigned long start
= slice
<< SLICE_HIGH_SHIFT
;
113 unsigned long end
= start
+ (1ul << SLICE_HIGH_SHIFT
);
115 /* Hack, so that each addresses is controlled by exactly one
116 * of the high or low area bitmaps, the first high area starts
119 start
= SLICE_LOW_TOP
;
121 return !slice_area_is_free(mm
, start
, end
- start
);
124 static struct slice_mask
slice_mask_for_free(struct mm_struct
*mm
)
126 struct slice_mask ret
= { 0, 0 };
129 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
130 if (!slice_low_has_vma(mm
, i
))
131 ret
.low_slices
|= 1u << i
;
133 if (mm
->task_size
<= SLICE_LOW_TOP
)
136 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
137 if (!slice_high_has_vma(mm
, i
))
138 ret
.high_slices
|= 1u << i
;
143 static struct slice_mask
slice_mask_for_size(struct mm_struct
*mm
, int psize
)
145 struct slice_mask ret
= { 0, 0 };
149 psizes
= mm
->context
.low_slices_psize
;
150 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
151 if (((psizes
>> (i
* 4)) & 0xf) == psize
)
152 ret
.low_slices
|= 1u << i
;
154 psizes
= mm
->context
.high_slices_psize
;
155 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
156 if (((psizes
>> (i
* 4)) & 0xf) == psize
)
157 ret
.high_slices
|= 1u << i
;
162 static int slice_check_fit(struct slice_mask mask
, struct slice_mask available
)
164 return (mask
.low_slices
& available
.low_slices
) == mask
.low_slices
&&
165 (mask
.high_slices
& available
.high_slices
) == mask
.high_slices
;
168 static void slice_flush_segments(void *parm
)
170 struct mm_struct
*mm
= parm
;
173 if (mm
!= current
->active_mm
)
176 /* update the paca copy of the context struct */
177 get_paca()->context
= current
->active_mm
->context
;
179 local_irq_save(flags
);
180 slb_flush_and_rebolt();
181 local_irq_restore(flags
);
184 static void slice_convert(struct mm_struct
*mm
, struct slice_mask mask
, int psize
)
186 /* Write the new slice psize bits */
187 u64 lpsizes
, hpsizes
;
188 unsigned long i
, flags
;
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm
, psize
);
191 slice_print_mask(" mask", mask
);
193 /* We need to use a spinlock here to protect against
194 * concurrent 64k -> 4k demotion ...
196 spin_lock_irqsave(&slice_convert_lock
, flags
);
198 lpsizes
= mm
->context
.low_slices_psize
;
199 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
200 if (mask
.low_slices
& (1u << i
))
201 lpsizes
= (lpsizes
& ~(0xful
<< (i
* 4))) |
202 (((unsigned long)psize
) << (i
* 4));
204 hpsizes
= mm
->context
.high_slices_psize
;
205 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
206 if (mask
.high_slices
& (1u << i
))
207 hpsizes
= (hpsizes
& ~(0xful
<< (i
* 4))) |
208 (((unsigned long)psize
) << (i
* 4));
210 mm
->context
.low_slices_psize
= lpsizes
;
211 mm
->context
.high_slices_psize
= hpsizes
;
213 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm
->context
.low_slices_psize
,
215 mm
->context
.high_slices_psize
);
217 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
220 /* XXX this is sub-optimal but will do for now */
221 on_each_cpu(slice_flush_segments
, mm
, 0, 1);
222 #ifdef CONFIG_SPU_BASE
223 spu_flush_all_slbs(mm
);
227 static unsigned long slice_find_area_bottomup(struct mm_struct
*mm
,
229 struct slice_mask available
,
230 int psize
, int use_cache
)
232 struct vm_area_struct
*vma
;
233 unsigned long start_addr
, addr
;
234 struct slice_mask mask
;
235 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
238 if (len
<= mm
->cached_hole_size
) {
239 start_addr
= addr
= TASK_UNMAPPED_BASE
;
240 mm
->cached_hole_size
= 0;
242 start_addr
= addr
= mm
->free_area_cache
;
244 start_addr
= addr
= TASK_UNMAPPED_BASE
;
248 addr
= _ALIGN_UP(addr
, 1ul << pshift
);
249 if ((TASK_SIZE
- len
) < addr
)
251 vma
= find_vma(mm
, addr
);
252 BUG_ON(vma
&& (addr
>= vma
->vm_end
));
254 mask
= slice_range_to_mask(addr
, len
);
255 if (!slice_check_fit(mask
, available
)) {
256 if (addr
< SLICE_LOW_TOP
)
257 addr
= _ALIGN_UP(addr
+ 1, 1ul << SLICE_LOW_SHIFT
);
259 addr
= _ALIGN_UP(addr
+ 1, 1ul << SLICE_HIGH_SHIFT
);
262 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
264 * Remember the place where we stopped the search:
267 mm
->free_area_cache
= addr
+ len
;
270 if (use_cache
&& (addr
+ mm
->cached_hole_size
) < vma
->vm_start
)
271 mm
->cached_hole_size
= vma
->vm_start
- addr
;
275 /* Make sure we didn't miss any holes */
276 if (use_cache
&& start_addr
!= TASK_UNMAPPED_BASE
) {
277 start_addr
= addr
= TASK_UNMAPPED_BASE
;
278 mm
->cached_hole_size
= 0;
284 static unsigned long slice_find_area_topdown(struct mm_struct
*mm
,
286 struct slice_mask available
,
287 int psize
, int use_cache
)
289 struct vm_area_struct
*vma
;
291 struct slice_mask mask
;
292 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
294 /* check if free_area_cache is useful for us */
296 if (len
<= mm
->cached_hole_size
) {
297 mm
->cached_hole_size
= 0;
298 mm
->free_area_cache
= mm
->mmap_base
;
301 /* either no address requested or can't fit in requested
304 addr
= mm
->free_area_cache
;
306 /* make sure it can fit in the remaining address space */
308 addr
= _ALIGN_DOWN(addr
- len
, 1ul << pshift
);
309 mask
= slice_range_to_mask(addr
, len
);
310 if (slice_check_fit(mask
, available
) &&
311 slice_area_is_free(mm
, addr
, len
))
312 /* remember the address as a hint for
315 return (mm
->free_area_cache
= addr
);
319 addr
= mm
->mmap_base
;
321 /* Go down by chunk size */
322 addr
= _ALIGN_DOWN(addr
- len
, 1ul << pshift
);
324 /* Check for hit with different page size */
325 mask
= slice_range_to_mask(addr
, len
);
326 if (!slice_check_fit(mask
, available
)) {
327 if (addr
< SLICE_LOW_TOP
)
328 addr
= _ALIGN_DOWN(addr
, 1ul << SLICE_LOW_SHIFT
);
329 else if (addr
< (1ul << SLICE_HIGH_SHIFT
))
330 addr
= SLICE_LOW_TOP
;
332 addr
= _ALIGN_DOWN(addr
, 1ul << SLICE_HIGH_SHIFT
);
337 * Lookup failure means no vma is above this address,
338 * else if new region fits below vma->vm_start,
339 * return with success:
341 vma
= find_vma(mm
, addr
);
342 if (!vma
|| (addr
+ len
) <= vma
->vm_start
) {
343 /* remember the address as a hint for next time */
345 mm
->free_area_cache
= addr
;
349 /* remember the largest hole we saw so far */
350 if (use_cache
&& (addr
+ mm
->cached_hole_size
) < vma
->vm_start
)
351 mm
->cached_hole_size
= vma
->vm_start
- addr
;
353 /* try just below the current vma->vm_start */
354 addr
= vma
->vm_start
;
358 * A failed mmap() very likely causes application failure,
359 * so fall back to the bottom-up function here. This scenario
360 * can happen with large stack limits and large mmap()
363 addr
= slice_find_area_bottomup(mm
, len
, available
, psize
, 0);
366 * Restore the topdown base:
369 mm
->free_area_cache
= mm
->mmap_base
;
370 mm
->cached_hole_size
= ~0UL;
377 static unsigned long slice_find_area(struct mm_struct
*mm
, unsigned long len
,
378 struct slice_mask mask
, int psize
,
379 int topdown
, int use_cache
)
382 return slice_find_area_topdown(mm
, len
, mask
, psize
, use_cache
);
384 return slice_find_area_bottomup(mm
, len
, mask
, psize
, use_cache
);
387 unsigned long slice_get_unmapped_area(unsigned long addr
, unsigned long len
,
388 unsigned long flags
, unsigned int psize
,
389 int topdown
, int use_cache
)
391 struct slice_mask mask
;
392 struct slice_mask good_mask
;
393 struct slice_mask potential_mask
= {0,0} /* silence stupid warning */;
395 int fixed
= (flags
& MAP_FIXED
);
396 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
397 struct mm_struct
*mm
= current
->mm
;
400 BUG_ON(mm
->task_size
== 0);
402 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm
, psize
);
403 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
404 addr
, len
, flags
, topdown
, use_cache
);
406 if (len
> mm
->task_size
)
408 if (len
& ((1ul << pshift
) - 1))
410 if (fixed
&& (addr
& ((1ul << pshift
) - 1)))
412 if (fixed
&& addr
> (mm
->task_size
- len
))
415 /* If hint, make sure it matches our alignment restrictions */
416 if (!fixed
&& addr
) {
417 addr
= _ALIGN_UP(addr
, 1ul << pshift
);
418 slice_dbg(" aligned addr=%lx\n", addr
);
421 /* First makeup a "good" mask of slices that have the right size
424 good_mask
= slice_mask_for_size(mm
, psize
);
425 slice_print_mask(" good_mask", good_mask
);
427 /* First check hint if it's valid or if we have MAP_FIXED */
428 if ((addr
!= 0 || fixed
) && (mm
->task_size
- len
) >= addr
) {
430 /* Don't bother with hint if it overlaps a VMA */
431 if (!fixed
&& !slice_area_is_free(mm
, addr
, len
))
434 /* Build a mask for the requested range */
435 mask
= slice_range_to_mask(addr
, len
);
436 slice_print_mask(" mask", mask
);
438 /* Check if we fit in the good mask. If we do, we just return,
441 if (slice_check_fit(mask
, good_mask
)) {
442 slice_dbg(" fits good !\n");
446 /* We don't fit in the good mask, check what other slices are
447 * empty and thus can be converted
449 potential_mask
= slice_mask_for_free(mm
);
450 potential_mask
.low_slices
|= good_mask
.low_slices
;
451 potential_mask
.high_slices
|= good_mask
.high_slices
;
453 slice_print_mask(" potential", potential_mask
);
454 if (slice_check_fit(mask
, potential_mask
)) {
455 slice_dbg(" fits potential !\n");
460 /* If we have MAP_FIXED and failed the above step, then error out */
465 slice_dbg(" search...\n");
467 /* Now let's see if we can find something in the existing slices
470 addr
= slice_find_area(mm
, len
, good_mask
, psize
, topdown
, use_cache
);
471 if (addr
!= -ENOMEM
) {
472 /* Found within the good mask, we don't have to setup,
473 * we thus return directly
475 slice_dbg(" found area at 0x%lx\n", addr
);
479 /* Won't fit, check what can be converted */
481 potential_mask
= slice_mask_for_free(mm
);
482 potential_mask
.low_slices
|= good_mask
.low_slices
;
483 potential_mask
.high_slices
|= good_mask
.high_slices
;
485 slice_print_mask(" potential", potential_mask
);
488 /* Now let's see if we can find something in the existing slices
491 addr
= slice_find_area(mm
, len
, potential_mask
, psize
, topdown
,
496 mask
= slice_range_to_mask(addr
, len
);
497 slice_dbg(" found potential area at 0x%lx\n", addr
);
498 slice_print_mask(" mask", mask
);
501 slice_convert(mm
, mask
, psize
);
505 EXPORT_SYMBOL_GPL(slice_get_unmapped_area
);
507 unsigned long arch_get_unmapped_area(struct file
*filp
,
513 return slice_get_unmapped_area(addr
, len
, flags
,
514 current
->mm
->context
.user_psize
,
518 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
519 const unsigned long addr0
,
520 const unsigned long len
,
521 const unsigned long pgoff
,
522 const unsigned long flags
)
524 return slice_get_unmapped_area(addr0
, len
, flags
,
525 current
->mm
->context
.user_psize
,
529 unsigned int get_slice_psize(struct mm_struct
*mm
, unsigned long addr
)
534 if (addr
< SLICE_LOW_TOP
) {
535 psizes
= mm
->context
.low_slices_psize
;
536 index
= GET_LOW_SLICE_INDEX(addr
);
538 psizes
= mm
->context
.high_slices_psize
;
539 index
= GET_HIGH_SLICE_INDEX(addr
);
542 return (psizes
>> (index
* 4)) & 0xf;
544 EXPORT_SYMBOL_GPL(get_slice_psize
);
547 * This is called by hash_page when it needs to do a lazy conversion of
548 * an address space from real 64K pages to combo 4K pages (typically
549 * when hitting a non cacheable mapping on a processor or hypervisor
550 * that won't allow them for 64K pages).
552 * This is also called in init_new_context() to change back the user
553 * psize from whatever the parent context had it set to
554 * N.B. This may be called before mm->context.id has been set.
556 * This function will only change the content of the {low,high)_slice_psize
557 * masks, it will not flush SLBs as this shall be handled lazily by the
560 void slice_set_user_psize(struct mm_struct
*mm
, unsigned int psize
)
562 unsigned long flags
, lpsizes
, hpsizes
;
563 unsigned int old_psize
;
566 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm
, psize
);
568 spin_lock_irqsave(&slice_convert_lock
, flags
);
570 old_psize
= mm
->context
.user_psize
;
571 slice_dbg(" old_psize=%d\n", old_psize
);
572 if (old_psize
== psize
)
575 mm
->context
.user_psize
= psize
;
578 lpsizes
= mm
->context
.low_slices_psize
;
579 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
580 if (((lpsizes
>> (i
* 4)) & 0xf) == old_psize
)
581 lpsizes
= (lpsizes
& ~(0xful
<< (i
* 4))) |
582 (((unsigned long)psize
) << (i
* 4));
584 hpsizes
= mm
->context
.high_slices_psize
;
585 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
586 if (((hpsizes
>> (i
* 4)) & 0xf) == old_psize
)
587 hpsizes
= (hpsizes
& ~(0xful
<< (i
* 4))) |
588 (((unsigned long)psize
) << (i
* 4));
590 mm
->context
.low_slices_psize
= lpsizes
;
591 mm
->context
.high_slices_psize
= hpsizes
;
593 slice_dbg(" lsps=%lx, hsps=%lx\n",
594 mm
->context
.low_slices_psize
,
595 mm
->context
.high_slices_psize
);
598 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
602 * is_hugepage_only_range() is used by generic code to verify wether
603 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
605 * until the generic code provides a more generic hook and/or starts
606 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
607 * here knows how to deal with), we hijack it to keep standard mappings
610 * because of that generic code limitation, MAP_FIXED mapping cannot
611 * "convert" back a slice with no VMAs to the standard page size, only
612 * get_unmapped_area() can. It would be possible to fix it here but I
613 * prefer working on fixing the generic code instead.
615 * WARNING: This will not work if hugetlbfs isn't enabled since the
616 * generic code will redefine that function as 0 in that. This is ok
617 * for now as we only use slices with hugetlbfs enabled. This should
618 * be fixed as the generic code gets fixed.
620 int is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
623 struct slice_mask mask
, available
;
625 mask
= slice_range_to_mask(addr
, len
);
626 available
= slice_mask_for_size(mm
, mm
->context
.user_psize
);
628 #if 0 /* too verbose */
629 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
631 slice_print_mask(" mask", mask
);
632 slice_print_mask(" available", available
);
634 return !slice_check_fit(mask
, available
);