2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <linux/hugetlb.h>
36 #include <asm/copro.h>
37 #include <asm/hugetlb.h>
39 /* some sanity checks */
40 #if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
41 #error PGTABLE_RANGE exceeds slice_mask high_slices size
44 static DEFINE_SPINLOCK(slice_convert_lock
);
50 static void slice_print_mask(const char *label
, struct slice_mask mask
)
52 char *p
, buf
[16 + 3 + 64 + 1];
58 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
59 *(p
++) = (mask
.low_slices
& (1 << i
)) ? '1' : '0';
63 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
64 *(p
++) = (mask
.high_slices
& (1ul << i
)) ? '1' : '0';
67 printk(KERN_DEBUG
"%s:%s\n", label
, buf
);
70 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
74 static void slice_print_mask(const char *label
, struct slice_mask mask
) {}
75 #define slice_dbg(fmt...)
79 static struct slice_mask
slice_range_to_mask(unsigned long start
,
82 unsigned long end
= start
+ len
- 1;
83 struct slice_mask ret
= { 0, 0 };
85 if (start
< SLICE_LOW_TOP
) {
86 unsigned long mend
= min(end
, SLICE_LOW_TOP
);
87 unsigned long mstart
= min(start
, SLICE_LOW_TOP
);
89 ret
.low_slices
= (1u << (GET_LOW_SLICE_INDEX(mend
) + 1))
90 - (1u << GET_LOW_SLICE_INDEX(mstart
));
93 if ((start
+ len
) > SLICE_LOW_TOP
)
94 ret
.high_slices
= (1ul << (GET_HIGH_SLICE_INDEX(end
) + 1))
95 - (1ul << GET_HIGH_SLICE_INDEX(start
));
100 static int slice_area_is_free(struct mm_struct
*mm
, unsigned long addr
,
103 struct vm_area_struct
*vma
;
105 if ((mm
->task_size
- len
) < addr
)
107 vma
= find_vma(mm
, addr
);
108 return (!vma
|| (addr
+ len
) <= vma
->vm_start
);
111 static int slice_low_has_vma(struct mm_struct
*mm
, unsigned long slice
)
113 return !slice_area_is_free(mm
, slice
<< SLICE_LOW_SHIFT
,
114 1ul << SLICE_LOW_SHIFT
);
117 static int slice_high_has_vma(struct mm_struct
*mm
, unsigned long slice
)
119 unsigned long start
= slice
<< SLICE_HIGH_SHIFT
;
120 unsigned long end
= start
+ (1ul << SLICE_HIGH_SHIFT
);
122 /* Hack, so that each addresses is controlled by exactly one
123 * of the high or low area bitmaps, the first high area starts
126 start
= SLICE_LOW_TOP
;
128 return !slice_area_is_free(mm
, start
, end
- start
);
131 static struct slice_mask
slice_mask_for_free(struct mm_struct
*mm
)
133 struct slice_mask ret
= { 0, 0 };
136 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
137 if (!slice_low_has_vma(mm
, i
))
138 ret
.low_slices
|= 1u << i
;
140 if (mm
->task_size
<= SLICE_LOW_TOP
)
143 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++)
144 if (!slice_high_has_vma(mm
, i
))
145 ret
.high_slices
|= 1ul << i
;
150 static struct slice_mask
slice_mask_for_size(struct mm_struct
*mm
, int psize
)
152 unsigned char *hpsizes
;
153 int index
, mask_index
;
154 struct slice_mask ret
= { 0, 0 };
158 lpsizes
= mm
->context
.low_slices_psize
;
159 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
160 if (((lpsizes
>> (i
* 4)) & 0xf) == psize
)
161 ret
.low_slices
|= 1u << i
;
163 hpsizes
= mm
->context
.high_slices_psize
;
164 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++) {
165 mask_index
= i
& 0x1;
167 if (((hpsizes
[index
] >> (mask_index
* 4)) & 0xf) == psize
)
168 ret
.high_slices
|= 1ul << i
;
174 static int slice_check_fit(struct slice_mask mask
, struct slice_mask available
)
176 return (mask
.low_slices
& available
.low_slices
) == mask
.low_slices
&&
177 (mask
.high_slices
& available
.high_slices
) == mask
.high_slices
;
180 static void slice_flush_segments(void *parm
)
182 struct mm_struct
*mm
= parm
;
185 if (mm
!= current
->active_mm
)
188 /* update the paca copy of the context struct */
189 get_paca()->context
= current
->active_mm
->context
;
191 local_irq_save(flags
);
192 slb_flush_and_rebolt();
193 local_irq_restore(flags
);
196 static void slice_convert(struct mm_struct
*mm
, struct slice_mask mask
, int psize
)
198 int index
, mask_index
;
199 /* Write the new slice psize bits */
200 unsigned char *hpsizes
;
202 unsigned long i
, flags
;
204 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm
, psize
);
205 slice_print_mask(" mask", mask
);
207 /* We need to use a spinlock here to protect against
208 * concurrent 64k -> 4k demotion ...
210 spin_lock_irqsave(&slice_convert_lock
, flags
);
212 lpsizes
= mm
->context
.low_slices_psize
;
213 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
214 if (mask
.low_slices
& (1u << i
))
215 lpsizes
= (lpsizes
& ~(0xful
<< (i
* 4))) |
216 (((unsigned long)psize
) << (i
* 4));
218 /* Assign the value back */
219 mm
->context
.low_slices_psize
= lpsizes
;
221 hpsizes
= mm
->context
.high_slices_psize
;
222 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++) {
223 mask_index
= i
& 0x1;
225 if (mask
.high_slices
& (1ul << i
))
226 hpsizes
[index
] = (hpsizes
[index
] &
227 ~(0xf << (mask_index
* 4))) |
228 (((unsigned long)psize
) << (mask_index
* 4));
231 slice_dbg(" lsps=%lx, hsps=%lx\n",
232 mm
->context
.low_slices_psize
,
233 mm
->context
.high_slices_psize
);
235 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
237 copro_flush_all_slbs(mm
);
241 * Compute which slice addr is part of;
242 * set *boundary_addr to the start or end boundary of that slice
243 * (depending on 'end' parameter);
244 * return boolean indicating if the slice is marked as available in the
245 * 'available' slice_mark.
247 static bool slice_scan_available(unsigned long addr
,
248 struct slice_mask available
,
250 unsigned long *boundary_addr
)
253 if (addr
< SLICE_LOW_TOP
) {
254 slice
= GET_LOW_SLICE_INDEX(addr
);
255 *boundary_addr
= (slice
+ end
) << SLICE_LOW_SHIFT
;
256 return !!(available
.low_slices
& (1u << slice
));
258 slice
= GET_HIGH_SLICE_INDEX(addr
);
259 *boundary_addr
= (slice
+ end
) ?
260 ((slice
+ end
) << SLICE_HIGH_SHIFT
) : SLICE_LOW_TOP
;
261 return !!(available
.high_slices
& (1ul << slice
));
265 static unsigned long slice_find_area_bottomup(struct mm_struct
*mm
,
267 struct slice_mask available
,
270 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
271 unsigned long addr
, found
, next_end
;
272 struct vm_unmapped_area_info info
;
276 info
.align_mask
= PAGE_MASK
& ((1ul << pshift
) - 1);
277 info
.align_offset
= 0;
279 addr
= TASK_UNMAPPED_BASE
;
280 while (addr
< TASK_SIZE
) {
281 info
.low_limit
= addr
;
282 if (!slice_scan_available(addr
, available
, 1, &addr
))
287 * At this point [info.low_limit; addr) covers
288 * available slices only and ends at a slice boundary.
289 * Check if we need to reduce the range, or if we can
290 * extend it to cover the next available slice.
292 if (addr
>= TASK_SIZE
)
294 else if (slice_scan_available(addr
, available
, 1, &next_end
)) {
298 info
.high_limit
= addr
;
300 found
= vm_unmapped_area(&info
);
301 if (!(found
& ~PAGE_MASK
))
308 static unsigned long slice_find_area_topdown(struct mm_struct
*mm
,
310 struct slice_mask available
,
313 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
314 unsigned long addr
, found
, prev
;
315 struct vm_unmapped_area_info info
;
317 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
319 info
.align_mask
= PAGE_MASK
& ((1ul << pshift
) - 1);
320 info
.align_offset
= 0;
322 addr
= mm
->mmap_base
;
323 while (addr
> PAGE_SIZE
) {
324 info
.high_limit
= addr
;
325 if (!slice_scan_available(addr
- 1, available
, 0, &addr
))
330 * At this point [addr; info.high_limit) covers
331 * available slices only and starts at a slice boundary.
332 * Check if we need to reduce the range, or if we can
333 * extend it to cover the previous available slice.
335 if (addr
< PAGE_SIZE
)
337 else if (slice_scan_available(addr
- 1, available
, 0, &prev
)) {
341 info
.low_limit
= addr
;
343 found
= vm_unmapped_area(&info
);
344 if (!(found
& ~PAGE_MASK
))
349 * A failed mmap() very likely causes application failure,
350 * so fall back to the bottom-up function here. This scenario
351 * can happen with large stack limits and large mmap()
354 return slice_find_area_bottomup(mm
, len
, available
, psize
);
358 static unsigned long slice_find_area(struct mm_struct
*mm
, unsigned long len
,
359 struct slice_mask mask
, int psize
,
363 return slice_find_area_topdown(mm
, len
, mask
, psize
);
365 return slice_find_area_bottomup(mm
, len
, mask
, psize
);
368 #define or_mask(dst, src) do { \
369 (dst).low_slices |= (src).low_slices; \
370 (dst).high_slices |= (src).high_slices; \
373 #define andnot_mask(dst, src) do { \
374 (dst).low_slices &= ~(src).low_slices; \
375 (dst).high_slices &= ~(src).high_slices; \
378 #ifdef CONFIG_PPC_64K_PAGES
379 #define MMU_PAGE_BASE MMU_PAGE_64K
381 #define MMU_PAGE_BASE MMU_PAGE_4K
384 unsigned long slice_get_unmapped_area(unsigned long addr
, unsigned long len
,
385 unsigned long flags
, unsigned int psize
,
388 struct slice_mask mask
= {0, 0};
389 struct slice_mask good_mask
;
390 struct slice_mask potential_mask
= {0,0} /* silence stupid warning */;
391 struct slice_mask compat_mask
= {0, 0};
392 int fixed
= (flags
& MAP_FIXED
);
393 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
394 struct mm_struct
*mm
= current
->mm
;
395 unsigned long newaddr
;
398 BUG_ON(mm
->task_size
== 0);
400 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm
, psize
);
401 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
402 addr
, len
, flags
, topdown
);
404 if (len
> mm
->task_size
)
406 if (len
& ((1ul << pshift
) - 1))
408 if (fixed
&& (addr
& ((1ul << pshift
) - 1)))
410 if (fixed
&& addr
> (mm
->task_size
- len
))
413 /* If hint, make sure it matches our alignment restrictions */
414 if (!fixed
&& addr
) {
415 addr
= _ALIGN_UP(addr
, 1ul << pshift
);
416 slice_dbg(" aligned addr=%lx\n", addr
);
417 /* Ignore hint if it's too large or overlaps a VMA */
418 if (addr
> mm
->task_size
- len
||
419 !slice_area_is_free(mm
, addr
, len
))
423 /* First make up a "good" mask of slices that have the right size
426 good_mask
= slice_mask_for_size(mm
, psize
);
427 slice_print_mask(" good_mask", good_mask
);
430 * Here "good" means slices that are already the right page size,
431 * "compat" means slices that have a compatible page size (i.e.
432 * 4k in a 64k pagesize kernel), and "free" means slices without
436 * check if fits in good | compat => OK
437 * check if fits in good | compat | free => convert free
440 * check if hint fits in good => OK
441 * check if hint fits in good | free => convert free
443 * search in good, found => OK
444 * search in good | free, found => convert free
445 * search in good | compat | free, found => convert free.
448 #ifdef CONFIG_PPC_64K_PAGES
449 /* If we support combo pages, we can allow 64k pages in 4k slices */
450 if (psize
== MMU_PAGE_64K
) {
451 compat_mask
= slice_mask_for_size(mm
, MMU_PAGE_4K
);
453 or_mask(good_mask
, compat_mask
);
457 /* First check hint if it's valid or if we have MAP_FIXED */
458 if (addr
!= 0 || fixed
) {
459 /* Build a mask for the requested range */
460 mask
= slice_range_to_mask(addr
, len
);
461 slice_print_mask(" mask", mask
);
463 /* Check if we fit in the good mask. If we do, we just return,
466 if (slice_check_fit(mask
, good_mask
)) {
467 slice_dbg(" fits good !\n");
471 /* Now let's see if we can find something in the existing
472 * slices for that size
474 newaddr
= slice_find_area(mm
, len
, good_mask
, psize
, topdown
);
475 if (newaddr
!= -ENOMEM
) {
476 /* Found within the good mask, we don't have to setup,
477 * we thus return directly
479 slice_dbg(" found area at 0x%lx\n", newaddr
);
484 /* We don't fit in the good mask, check what other slices are
485 * empty and thus can be converted
487 potential_mask
= slice_mask_for_free(mm
);
488 or_mask(potential_mask
, good_mask
);
489 slice_print_mask(" potential", potential_mask
);
491 if ((addr
!= 0 || fixed
) && slice_check_fit(mask
, potential_mask
)) {
492 slice_dbg(" fits potential !\n");
496 /* If we have MAP_FIXED and failed the above steps, then error out */
500 slice_dbg(" search...\n");
502 /* If we had a hint that didn't work out, see if we can fit
503 * anywhere in the good area.
506 addr
= slice_find_area(mm
, len
, good_mask
, psize
, topdown
);
507 if (addr
!= -ENOMEM
) {
508 slice_dbg(" found area at 0x%lx\n", addr
);
513 /* Now let's see if we can find something in the existing slices
514 * for that size plus free slices
516 addr
= slice_find_area(mm
, len
, potential_mask
, psize
, topdown
);
518 #ifdef CONFIG_PPC_64K_PAGES
519 if (addr
== -ENOMEM
&& psize
== MMU_PAGE_64K
) {
520 /* retry the search with 4k-page slices included */
521 or_mask(potential_mask
, compat_mask
);
522 addr
= slice_find_area(mm
, len
, potential_mask
, psize
,
530 mask
= slice_range_to_mask(addr
, len
);
531 slice_dbg(" found potential area at 0x%lx\n", addr
);
532 slice_print_mask(" mask", mask
);
535 andnot_mask(mask
, good_mask
);
536 andnot_mask(mask
, compat_mask
);
537 if (mask
.low_slices
|| mask
.high_slices
) {
538 slice_convert(mm
, mask
, psize
);
539 if (psize
> MMU_PAGE_BASE
)
540 on_each_cpu(slice_flush_segments
, mm
, 1);
545 EXPORT_SYMBOL_GPL(slice_get_unmapped_area
);
547 unsigned long arch_get_unmapped_area(struct file
*filp
,
553 return slice_get_unmapped_area(addr
, len
, flags
,
554 current
->mm
->context
.user_psize
, 0);
557 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
558 const unsigned long addr0
,
559 const unsigned long len
,
560 const unsigned long pgoff
,
561 const unsigned long flags
)
563 return slice_get_unmapped_area(addr0
, len
, flags
,
564 current
->mm
->context
.user_psize
, 1);
567 unsigned int get_slice_psize(struct mm_struct
*mm
, unsigned long addr
)
569 unsigned char *hpsizes
;
570 int index
, mask_index
;
572 if (addr
< SLICE_LOW_TOP
) {
574 lpsizes
= mm
->context
.low_slices_psize
;
575 index
= GET_LOW_SLICE_INDEX(addr
);
576 return (lpsizes
>> (index
* 4)) & 0xf;
578 hpsizes
= mm
->context
.high_slices_psize
;
579 index
= GET_HIGH_SLICE_INDEX(addr
);
580 mask_index
= index
& 0x1;
581 return (hpsizes
[index
>> 1] >> (mask_index
* 4)) & 0xf;
583 EXPORT_SYMBOL_GPL(get_slice_psize
);
586 * This is called by hash_page when it needs to do a lazy conversion of
587 * an address space from real 64K pages to combo 4K pages (typically
588 * when hitting a non cacheable mapping on a processor or hypervisor
589 * that won't allow them for 64K pages).
591 * This is also called in init_new_context() to change back the user
592 * psize from whatever the parent context had it set to
593 * N.B. This may be called before mm->context.id has been set.
595 * This function will only change the content of the {low,high)_slice_psize
596 * masks, it will not flush SLBs as this shall be handled lazily by the
599 void slice_set_user_psize(struct mm_struct
*mm
, unsigned int psize
)
601 int index
, mask_index
;
602 unsigned char *hpsizes
;
603 unsigned long flags
, lpsizes
;
604 unsigned int old_psize
;
607 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm
, psize
);
609 spin_lock_irqsave(&slice_convert_lock
, flags
);
611 old_psize
= mm
->context
.user_psize
;
612 slice_dbg(" old_psize=%d\n", old_psize
);
613 if (old_psize
== psize
)
616 mm
->context
.user_psize
= psize
;
619 lpsizes
= mm
->context
.low_slices_psize
;
620 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
621 if (((lpsizes
>> (i
* 4)) & 0xf) == old_psize
)
622 lpsizes
= (lpsizes
& ~(0xful
<< (i
* 4))) |
623 (((unsigned long)psize
) << (i
* 4));
624 /* Assign the value back */
625 mm
->context
.low_slices_psize
= lpsizes
;
627 hpsizes
= mm
->context
.high_slices_psize
;
628 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++) {
629 mask_index
= i
& 0x1;
631 if (((hpsizes
[index
] >> (mask_index
* 4)) & 0xf) == old_psize
)
632 hpsizes
[index
] = (hpsizes
[index
] &
633 ~(0xf << (mask_index
* 4))) |
634 (((unsigned long)psize
) << (mask_index
* 4));
640 slice_dbg(" lsps=%lx, hsps=%lx\n",
641 mm
->context
.low_slices_psize
,
642 mm
->context
.high_slices_psize
);
645 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
648 void slice_set_range_psize(struct mm_struct
*mm
, unsigned long start
,
649 unsigned long len
, unsigned int psize
)
651 struct slice_mask mask
= slice_range_to_mask(start
, len
);
653 slice_convert(mm
, mask
, psize
);
656 #ifdef CONFIG_HUGETLB_PAGE
658 * is_hugepage_only_range() is used by generic code to verify whether
659 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
661 * until the generic code provides a more generic hook and/or starts
662 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
663 * here knows how to deal with), we hijack it to keep standard mappings
666 * because of that generic code limitation, MAP_FIXED mapping cannot
667 * "convert" back a slice with no VMAs to the standard page size, only
668 * get_unmapped_area() can. It would be possible to fix it here but I
669 * prefer working on fixing the generic code instead.
671 * WARNING: This will not work if hugetlbfs isn't enabled since the
672 * generic code will redefine that function as 0 in that. This is ok
673 * for now as we only use slices with hugetlbfs enabled. This should
674 * be fixed as the generic code gets fixed.
676 int is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
679 struct slice_mask mask
, available
;
680 unsigned int psize
= mm
->context
.user_psize
;
682 mask
= slice_range_to_mask(addr
, len
);
683 available
= slice_mask_for_size(mm
, psize
);
684 #ifdef CONFIG_PPC_64K_PAGES
685 /* We need to account for 4k slices too */
686 if (psize
== MMU_PAGE_64K
) {
687 struct slice_mask compat_mask
;
688 compat_mask
= slice_mask_for_size(mm
, MMU_PAGE_4K
);
689 or_mask(available
, compat_mask
);
693 #if 0 /* too verbose */
694 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
696 slice_print_mask(" mask", mask
);
697 slice_print_mask(" available", available
);
699 return !slice_check_fit(mask
, available
);