2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <linux/hugetlb.h>
36 #include <asm/copro.h>
37 #include <asm/hugetlb.h>
39 static DEFINE_SPINLOCK(slice_convert_lock
);
41 * One bit per slice. We have lower slices which cover 256MB segments
42 * upto 4G range. That gets us 16 low slices. For the rest we track slices
47 DECLARE_BITMAP(high_slices
, SLICE_NUM_HIGH
);
53 static void slice_print_mask(const char *label
, struct slice_mask mask
)
57 pr_devel("%s low_slice: %*pbl\n", label
, (int)SLICE_NUM_LOW
, &mask
.low_slices
);
58 pr_devel("%s high_slice: %*pbl\n", label
, (int)SLICE_NUM_HIGH
, mask
.high_slices
);
61 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
65 static void slice_print_mask(const char *label
, struct slice_mask mask
) {}
66 #define slice_dbg(fmt...)
70 static void slice_range_to_mask(unsigned long start
, unsigned long len
,
71 struct slice_mask
*ret
)
73 unsigned long end
= start
+ len
- 1;
76 bitmap_zero(ret
->high_slices
, SLICE_NUM_HIGH
);
78 if (start
< SLICE_LOW_TOP
) {
79 unsigned long mend
= min(end
, (SLICE_LOW_TOP
- 1));
81 ret
->low_slices
= (1u << (GET_LOW_SLICE_INDEX(mend
) + 1))
82 - (1u << GET_LOW_SLICE_INDEX(start
));
85 if ((start
+ len
) > SLICE_LOW_TOP
) {
86 unsigned long start_index
= GET_HIGH_SLICE_INDEX(start
);
87 unsigned long align_end
= ALIGN(end
, (1UL << SLICE_HIGH_SHIFT
));
88 unsigned long count
= GET_HIGH_SLICE_INDEX(align_end
) - start_index
;
90 bitmap_set(ret
->high_slices
, start_index
, count
);
94 static int slice_area_is_free(struct mm_struct
*mm
, unsigned long addr
,
97 struct vm_area_struct
*vma
;
99 if ((mm
->context
.slb_addr_limit
- len
) < addr
)
101 vma
= find_vma(mm
, addr
);
102 return (!vma
|| (addr
+ len
) <= vm_start_gap(vma
));
105 static int slice_low_has_vma(struct mm_struct
*mm
, unsigned long slice
)
107 return !slice_area_is_free(mm
, slice
<< SLICE_LOW_SHIFT
,
108 1ul << SLICE_LOW_SHIFT
);
111 static int slice_high_has_vma(struct mm_struct
*mm
, unsigned long slice
)
113 unsigned long start
= slice
<< SLICE_HIGH_SHIFT
;
114 unsigned long end
= start
+ (1ul << SLICE_HIGH_SHIFT
);
116 /* Hack, so that each addresses is controlled by exactly one
117 * of the high or low area bitmaps, the first high area starts
120 start
= SLICE_LOW_TOP
;
122 return !slice_area_is_free(mm
, start
, end
- start
);
125 static void slice_mask_for_free(struct mm_struct
*mm
, struct slice_mask
*ret
,
126 unsigned long high_limit
)
131 bitmap_zero(ret
->high_slices
, SLICE_NUM_HIGH
);
133 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
134 if (!slice_low_has_vma(mm
, i
))
135 ret
->low_slices
|= 1u << i
;
137 if (high_limit
<= SLICE_LOW_TOP
)
140 for (i
= 0; i
< GET_HIGH_SLICE_INDEX(high_limit
); i
++)
141 if (!slice_high_has_vma(mm
, i
))
142 __set_bit(i
, ret
->high_slices
);
145 static void slice_mask_for_size(struct mm_struct
*mm
, int psize
, struct slice_mask
*ret
,
146 unsigned long high_limit
)
148 unsigned char *hpsizes
;
149 int index
, mask_index
;
154 bitmap_zero(ret
->high_slices
, SLICE_NUM_HIGH
);
156 lpsizes
= mm
->context
.low_slices_psize
;
157 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
158 if (((lpsizes
>> (i
* 4)) & 0xf) == psize
)
159 ret
->low_slices
|= 1u << i
;
161 if (high_limit
<= SLICE_LOW_TOP
)
164 hpsizes
= mm
->context
.high_slices_psize
;
165 for (i
= 0; i
< GET_HIGH_SLICE_INDEX(high_limit
); i
++) {
166 mask_index
= i
& 0x1;
168 if (((hpsizes
[index
] >> (mask_index
* 4)) & 0xf) == psize
)
169 __set_bit(i
, ret
->high_slices
);
173 static int slice_check_fit(struct mm_struct
*mm
,
174 struct slice_mask mask
, struct slice_mask available
)
176 DECLARE_BITMAP(result
, SLICE_NUM_HIGH
);
178 * Make sure we just do bit compare only to the max
179 * addr limit and not the full bit map size.
181 unsigned long slice_count
= GET_HIGH_SLICE_INDEX(mm
->context
.slb_addr_limit
);
183 bitmap_and(result
, mask
.high_slices
,
184 available
.high_slices
, slice_count
);
186 return (mask
.low_slices
& available
.low_slices
) == mask
.low_slices
&&
187 bitmap_equal(result
, mask
.high_slices
, slice_count
);
190 static void slice_flush_segments(void *parm
)
192 struct mm_struct
*mm
= parm
;
195 if (mm
!= current
->active_mm
)
198 copy_mm_to_paca(current
->active_mm
);
200 local_irq_save(flags
);
201 slb_flush_and_rebolt();
202 local_irq_restore(flags
);
205 static void slice_convert(struct mm_struct
*mm
, struct slice_mask mask
, int psize
)
207 int index
, mask_index
;
208 /* Write the new slice psize bits */
209 unsigned char *hpsizes
;
211 unsigned long i
, flags
;
213 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm
, psize
);
214 slice_print_mask(" mask", mask
);
216 /* We need to use a spinlock here to protect against
217 * concurrent 64k -> 4k demotion ...
219 spin_lock_irqsave(&slice_convert_lock
, flags
);
221 lpsizes
= mm
->context
.low_slices_psize
;
222 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
223 if (mask
.low_slices
& (1u << i
))
224 lpsizes
= (lpsizes
& ~(0xful
<< (i
* 4))) |
225 (((unsigned long)psize
) << (i
* 4));
227 /* Assign the value back */
228 mm
->context
.low_slices_psize
= lpsizes
;
230 hpsizes
= mm
->context
.high_slices_psize
;
231 for (i
= 0; i
< GET_HIGH_SLICE_INDEX(mm
->context
.slb_addr_limit
); i
++) {
232 mask_index
= i
& 0x1;
234 if (test_bit(i
, mask
.high_slices
))
235 hpsizes
[index
] = (hpsizes
[index
] &
236 ~(0xf << (mask_index
* 4))) |
237 (((unsigned long)psize
) << (mask_index
* 4));
240 slice_dbg(" lsps=%lx, hsps=%lx\n",
241 (unsigned long)mm
->context
.low_slices_psize
,
242 (unsigned long)mm
->context
.high_slices_psize
);
244 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
246 copro_flush_all_slbs(mm
);
250 * Compute which slice addr is part of;
251 * set *boundary_addr to the start or end boundary of that slice
252 * (depending on 'end' parameter);
253 * return boolean indicating if the slice is marked as available in the
254 * 'available' slice_mark.
256 static bool slice_scan_available(unsigned long addr
,
257 struct slice_mask available
,
259 unsigned long *boundary_addr
)
262 if (addr
< SLICE_LOW_TOP
) {
263 slice
= GET_LOW_SLICE_INDEX(addr
);
264 *boundary_addr
= (slice
+ end
) << SLICE_LOW_SHIFT
;
265 return !!(available
.low_slices
& (1u << slice
));
267 slice
= GET_HIGH_SLICE_INDEX(addr
);
268 *boundary_addr
= (slice
+ end
) ?
269 ((slice
+ end
) << SLICE_HIGH_SHIFT
) : SLICE_LOW_TOP
;
270 return !!test_bit(slice
, available
.high_slices
);
274 static unsigned long slice_find_area_bottomup(struct mm_struct
*mm
,
276 struct slice_mask available
,
277 int psize
, unsigned long high_limit
)
279 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
280 unsigned long addr
, found
, next_end
;
281 struct vm_unmapped_area_info info
;
285 info
.align_mask
= PAGE_MASK
& ((1ul << pshift
) - 1);
286 info
.align_offset
= 0;
288 addr
= TASK_UNMAPPED_BASE
;
290 * Check till the allow max value for this mmap request
292 while (addr
< high_limit
) {
293 info
.low_limit
= addr
;
294 if (!slice_scan_available(addr
, available
, 1, &addr
))
299 * At this point [info.low_limit; addr) covers
300 * available slices only and ends at a slice boundary.
301 * Check if we need to reduce the range, or if we can
302 * extend it to cover the next available slice.
304 if (addr
>= high_limit
)
306 else if (slice_scan_available(addr
, available
, 1, &next_end
)) {
310 info
.high_limit
= addr
;
312 found
= vm_unmapped_area(&info
);
313 if (!(found
& ~PAGE_MASK
))
320 static unsigned long slice_find_area_topdown(struct mm_struct
*mm
,
322 struct slice_mask available
,
323 int psize
, unsigned long high_limit
)
325 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
326 unsigned long addr
, found
, prev
;
327 struct vm_unmapped_area_info info
;
329 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
331 info
.align_mask
= PAGE_MASK
& ((1ul << pshift
) - 1);
332 info
.align_offset
= 0;
334 addr
= mm
->mmap_base
;
336 * If we are trying to allocate above DEFAULT_MAP_WINDOW
337 * Add the different to the mmap_base.
338 * Only for that request for which high_limit is above
339 * DEFAULT_MAP_WINDOW we should apply this.
341 if (high_limit
> DEFAULT_MAP_WINDOW
)
342 addr
+= mm
->context
.slb_addr_limit
- DEFAULT_MAP_WINDOW
;
344 while (addr
> PAGE_SIZE
) {
345 info
.high_limit
= addr
;
346 if (!slice_scan_available(addr
- 1, available
, 0, &addr
))
351 * At this point [addr; info.high_limit) covers
352 * available slices only and starts at a slice boundary.
353 * Check if we need to reduce the range, or if we can
354 * extend it to cover the previous available slice.
356 if (addr
< PAGE_SIZE
)
358 else if (slice_scan_available(addr
- 1, available
, 0, &prev
)) {
362 info
.low_limit
= addr
;
364 found
= vm_unmapped_area(&info
);
365 if (!(found
& ~PAGE_MASK
))
370 * A failed mmap() very likely causes application failure,
371 * so fall back to the bottom-up function here. This scenario
372 * can happen with large stack limits and large mmap()
375 return slice_find_area_bottomup(mm
, len
, available
, psize
, high_limit
);
379 static unsigned long slice_find_area(struct mm_struct
*mm
, unsigned long len
,
380 struct slice_mask mask
, int psize
,
381 int topdown
, unsigned long high_limit
)
384 return slice_find_area_topdown(mm
, len
, mask
, psize
, high_limit
);
386 return slice_find_area_bottomup(mm
, len
, mask
, psize
, high_limit
);
389 static inline void slice_or_mask(struct slice_mask
*dst
, struct slice_mask
*src
)
391 DECLARE_BITMAP(result
, SLICE_NUM_HIGH
);
393 dst
->low_slices
|= src
->low_slices
;
394 bitmap_or(result
, dst
->high_slices
, src
->high_slices
, SLICE_NUM_HIGH
);
395 bitmap_copy(dst
->high_slices
, result
, SLICE_NUM_HIGH
);
398 static inline void slice_andnot_mask(struct slice_mask
*dst
, struct slice_mask
*src
)
400 DECLARE_BITMAP(result
, SLICE_NUM_HIGH
);
402 dst
->low_slices
&= ~src
->low_slices
;
404 bitmap_andnot(result
, dst
->high_slices
, src
->high_slices
, SLICE_NUM_HIGH
);
405 bitmap_copy(dst
->high_slices
, result
, SLICE_NUM_HIGH
);
408 #ifdef CONFIG_PPC_64K_PAGES
409 #define MMU_PAGE_BASE MMU_PAGE_64K
411 #define MMU_PAGE_BASE MMU_PAGE_4K
414 unsigned long slice_get_unmapped_area(unsigned long addr
, unsigned long len
,
415 unsigned long flags
, unsigned int psize
,
418 struct slice_mask mask
;
419 struct slice_mask good_mask
;
420 struct slice_mask potential_mask
;
421 struct slice_mask compat_mask
;
422 int fixed
= (flags
& MAP_FIXED
);
423 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
424 unsigned long page_size
= 1UL << pshift
;
425 struct mm_struct
*mm
= current
->mm
;
426 unsigned long newaddr
;
427 unsigned long high_limit
;
429 high_limit
= DEFAULT_MAP_WINDOW
;
430 if (addr
>= high_limit
|| (fixed
&& (addr
+ len
> high_limit
)))
431 high_limit
= TASK_SIZE
;
433 if (len
> high_limit
)
435 if (len
& (page_size
- 1))
438 if (addr
& (page_size
- 1))
440 if (addr
> high_limit
- len
)
444 if (high_limit
> mm
->context
.slb_addr_limit
) {
445 mm
->context
.slb_addr_limit
= high_limit
;
446 on_each_cpu(slice_flush_segments
, mm
, 1);
450 * init different masks
453 bitmap_zero(mask
.high_slices
, SLICE_NUM_HIGH
);
455 /* silence stupid warning */;
456 potential_mask
.low_slices
= 0;
457 bitmap_zero(potential_mask
.high_slices
, SLICE_NUM_HIGH
);
459 compat_mask
.low_slices
= 0;
460 bitmap_zero(compat_mask
.high_slices
, SLICE_NUM_HIGH
);
463 BUG_ON(mm
->task_size
== 0);
464 BUG_ON(mm
->context
.slb_addr_limit
== 0);
465 VM_BUG_ON(radix_enabled());
467 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm
, psize
);
468 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
469 addr
, len
, flags
, topdown
);
471 /* If hint, make sure it matches our alignment restrictions */
472 if (!fixed
&& addr
) {
473 addr
= _ALIGN_UP(addr
, page_size
);
474 slice_dbg(" aligned addr=%lx\n", addr
);
475 /* Ignore hint if it's too large or overlaps a VMA */
476 if (addr
> high_limit
- len
||
477 !slice_area_is_free(mm
, addr
, len
))
481 /* First make up a "good" mask of slices that have the right size
484 slice_mask_for_size(mm
, psize
, &good_mask
, high_limit
);
485 slice_print_mask(" good_mask", good_mask
);
488 * Here "good" means slices that are already the right page size,
489 * "compat" means slices that have a compatible page size (i.e.
490 * 4k in a 64k pagesize kernel), and "free" means slices without
494 * check if fits in good | compat => OK
495 * check if fits in good | compat | free => convert free
498 * check if hint fits in good => OK
499 * check if hint fits in good | free => convert free
501 * search in good, found => OK
502 * search in good | free, found => convert free
503 * search in good | compat | free, found => convert free.
506 #ifdef CONFIG_PPC_64K_PAGES
507 /* If we support combo pages, we can allow 64k pages in 4k slices */
508 if (psize
== MMU_PAGE_64K
) {
509 slice_mask_for_size(mm
, MMU_PAGE_4K
, &compat_mask
, high_limit
);
511 slice_or_mask(&good_mask
, &compat_mask
);
515 /* First check hint if it's valid or if we have MAP_FIXED */
516 if (addr
!= 0 || fixed
) {
517 /* Build a mask for the requested range */
518 slice_range_to_mask(addr
, len
, &mask
);
519 slice_print_mask(" mask", mask
);
521 /* Check if we fit in the good mask. If we do, we just return,
524 if (slice_check_fit(mm
, mask
, good_mask
)) {
525 slice_dbg(" fits good !\n");
529 /* Now let's see if we can find something in the existing
530 * slices for that size
532 newaddr
= slice_find_area(mm
, len
, good_mask
,
533 psize
, topdown
, high_limit
);
534 if (newaddr
!= -ENOMEM
) {
535 /* Found within the good mask, we don't have to setup,
536 * we thus return directly
538 slice_dbg(" found area at 0x%lx\n", newaddr
);
543 * We don't fit in the good mask, check what other slices are
544 * empty and thus can be converted
546 slice_mask_for_free(mm
, &potential_mask
, high_limit
);
547 slice_or_mask(&potential_mask
, &good_mask
);
548 slice_print_mask(" potential", potential_mask
);
550 if ((addr
!= 0 || fixed
) && slice_check_fit(mm
, mask
, potential_mask
)) {
551 slice_dbg(" fits potential !\n");
555 /* If we have MAP_FIXED and failed the above steps, then error out */
559 slice_dbg(" search...\n");
561 /* If we had a hint that didn't work out, see if we can fit
562 * anywhere in the good area.
565 addr
= slice_find_area(mm
, len
, good_mask
,
566 psize
, topdown
, high_limit
);
567 if (addr
!= -ENOMEM
) {
568 slice_dbg(" found area at 0x%lx\n", addr
);
573 /* Now let's see if we can find something in the existing slices
574 * for that size plus free slices
576 addr
= slice_find_area(mm
, len
, potential_mask
,
577 psize
, topdown
, high_limit
);
579 #ifdef CONFIG_PPC_64K_PAGES
580 if (addr
== -ENOMEM
&& psize
== MMU_PAGE_64K
) {
581 /* retry the search with 4k-page slices included */
582 slice_or_mask(&potential_mask
, &compat_mask
);
583 addr
= slice_find_area(mm
, len
, potential_mask
,
584 psize
, topdown
, high_limit
);
591 slice_range_to_mask(addr
, len
, &mask
);
592 slice_dbg(" found potential area at 0x%lx\n", addr
);
593 slice_print_mask(" mask", mask
);
596 slice_andnot_mask(&mask
, &good_mask
);
597 slice_andnot_mask(&mask
, &compat_mask
);
598 if (mask
.low_slices
|| !bitmap_empty(mask
.high_slices
, SLICE_NUM_HIGH
)) {
599 slice_convert(mm
, mask
, psize
);
600 if (psize
> MMU_PAGE_BASE
)
601 on_each_cpu(slice_flush_segments
, mm
, 1);
606 EXPORT_SYMBOL_GPL(slice_get_unmapped_area
);
608 unsigned long arch_get_unmapped_area(struct file
*filp
,
614 return slice_get_unmapped_area(addr
, len
, flags
,
615 current
->mm
->context
.user_psize
, 0);
618 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
619 const unsigned long addr0
,
620 const unsigned long len
,
621 const unsigned long pgoff
,
622 const unsigned long flags
)
624 return slice_get_unmapped_area(addr0
, len
, flags
,
625 current
->mm
->context
.user_psize
, 1);
628 unsigned int get_slice_psize(struct mm_struct
*mm
, unsigned long addr
)
630 unsigned char *hpsizes
;
631 int index
, mask_index
;
634 * Radix doesn't use slice, but can get enabled along with MMU_SLICE
636 if (radix_enabled()) {
637 #ifdef CONFIG_PPC_64K_PAGES
643 if (addr
< SLICE_LOW_TOP
) {
645 lpsizes
= mm
->context
.low_slices_psize
;
646 index
= GET_LOW_SLICE_INDEX(addr
);
647 return (lpsizes
>> (index
* 4)) & 0xf;
649 hpsizes
= mm
->context
.high_slices_psize
;
650 index
= GET_HIGH_SLICE_INDEX(addr
);
651 mask_index
= index
& 0x1;
652 return (hpsizes
[index
>> 1] >> (mask_index
* 4)) & 0xf;
654 EXPORT_SYMBOL_GPL(get_slice_psize
);
657 * This is called by hash_page when it needs to do a lazy conversion of
658 * an address space from real 64K pages to combo 4K pages (typically
659 * when hitting a non cacheable mapping on a processor or hypervisor
660 * that won't allow them for 64K pages).
662 * This is also called in init_new_context() to change back the user
663 * psize from whatever the parent context had it set to
664 * N.B. This may be called before mm->context.id has been set.
666 * This function will only change the content of the {low,high)_slice_psize
667 * masks, it will not flush SLBs as this shall be handled lazily by the
670 void slice_set_user_psize(struct mm_struct
*mm
, unsigned int psize
)
672 int index
, mask_index
;
673 unsigned char *hpsizes
;
674 unsigned long flags
, lpsizes
;
675 unsigned int old_psize
;
678 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm
, psize
);
680 VM_BUG_ON(radix_enabled());
681 spin_lock_irqsave(&slice_convert_lock
, flags
);
683 old_psize
= mm
->context
.user_psize
;
684 slice_dbg(" old_psize=%d\n", old_psize
);
685 if (old_psize
== psize
)
688 mm
->context
.user_psize
= psize
;
691 lpsizes
= mm
->context
.low_slices_psize
;
692 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
693 if (((lpsizes
>> (i
* 4)) & 0xf) == old_psize
)
694 lpsizes
= (lpsizes
& ~(0xful
<< (i
* 4))) |
695 (((unsigned long)psize
) << (i
* 4));
696 /* Assign the value back */
697 mm
->context
.low_slices_psize
= lpsizes
;
699 hpsizes
= mm
->context
.high_slices_psize
;
700 for (i
= 0; i
< SLICE_NUM_HIGH
; i
++) {
701 mask_index
= i
& 0x1;
703 if (((hpsizes
[index
] >> (mask_index
* 4)) & 0xf) == old_psize
)
704 hpsizes
[index
] = (hpsizes
[index
] &
705 ~(0xf << (mask_index
* 4))) |
706 (((unsigned long)psize
) << (mask_index
* 4));
712 slice_dbg(" lsps=%lx, hsps=%lx\n",
713 (unsigned long)mm
->context
.low_slices_psize
,
714 (unsigned long)mm
->context
.high_slices_psize
);
717 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
720 void slice_set_range_psize(struct mm_struct
*mm
, unsigned long start
,
721 unsigned long len
, unsigned int psize
)
723 struct slice_mask mask
;
725 VM_BUG_ON(radix_enabled());
727 slice_range_to_mask(start
, len
, &mask
);
728 slice_convert(mm
, mask
, psize
);
731 #ifdef CONFIG_HUGETLB_PAGE
733 * is_hugepage_only_range() is used by generic code to verify whether
734 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
736 * until the generic code provides a more generic hook and/or starts
737 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
738 * here knows how to deal with), we hijack it to keep standard mappings
741 * because of that generic code limitation, MAP_FIXED mapping cannot
742 * "convert" back a slice with no VMAs to the standard page size, only
743 * get_unmapped_area() can. It would be possible to fix it here but I
744 * prefer working on fixing the generic code instead.
746 * WARNING: This will not work if hugetlbfs isn't enabled since the
747 * generic code will redefine that function as 0 in that. This is ok
748 * for now as we only use slices with hugetlbfs enabled. This should
749 * be fixed as the generic code gets fixed.
751 int is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
754 struct slice_mask mask
, available
;
755 unsigned int psize
= mm
->context
.user_psize
;
756 unsigned long high_limit
= mm
->context
.slb_addr_limit
;
761 slice_range_to_mask(addr
, len
, &mask
);
762 slice_mask_for_size(mm
, psize
, &available
, high_limit
);
763 #ifdef CONFIG_PPC_64K_PAGES
764 /* We need to account for 4k slices too */
765 if (psize
== MMU_PAGE_64K
) {
766 struct slice_mask compat_mask
;
767 slice_mask_for_size(mm
, MMU_PAGE_4K
, &compat_mask
, high_limit
);
768 slice_or_mask(&available
, &compat_mask
);
772 #if 0 /* too verbose */
773 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
775 slice_print_mask(" mask", mask
);
776 slice_print_mask(" available", available
);
778 return !slice_check_fit(mm
, mask
, available
);