1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * address space "slices" (meta-segments) support
5 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
7 * Based on hugetlb implementation
9 * Copyright (C) 2003 David Gibson, IBM Corporation.
14 #include <linux/kernel.h>
16 #include <linux/pagemap.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/hugetlb.h>
21 #include <linux/sched/mm.h>
22 #include <linux/security.h>
25 #include <asm/copro.h>
26 #include <asm/hugetlb.h>
27 #include <asm/mmu_context.h>
29 static DEFINE_SPINLOCK(slice_convert_lock
);
34 static void slice_print_mask(const char *label
, const struct slice_mask
*mask
)
38 pr_devel("%s low_slice: %*pbl\n", label
,
39 (int)SLICE_NUM_LOW
, &mask
->low_slices
);
40 pr_devel("%s high_slice: %*pbl\n", label
,
41 (int)SLICE_NUM_HIGH
, mask
->high_slices
);
44 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
48 static void slice_print_mask(const char *label
, const struct slice_mask
*mask
) {}
49 #define slice_dbg(fmt...)
53 static inline notrace
bool slice_addr_is_low(unsigned long addr
)
57 return tmp
< SLICE_LOW_TOP
;
60 static void slice_range_to_mask(unsigned long start
, unsigned long len
,
61 struct slice_mask
*ret
)
63 unsigned long end
= start
+ len
- 1;
67 bitmap_zero(ret
->high_slices
, SLICE_NUM_HIGH
);
69 if (slice_addr_is_low(start
)) {
70 unsigned long mend
= min(end
,
71 (unsigned long)(SLICE_LOW_TOP
- 1));
73 ret
->low_slices
= (1u << (GET_LOW_SLICE_INDEX(mend
) + 1))
74 - (1u << GET_LOW_SLICE_INDEX(start
));
77 if (SLICE_NUM_HIGH
&& !slice_addr_is_low(end
)) {
78 unsigned long start_index
= GET_HIGH_SLICE_INDEX(start
);
79 unsigned long align_end
= ALIGN(end
, (1UL << SLICE_HIGH_SHIFT
));
80 unsigned long count
= GET_HIGH_SLICE_INDEX(align_end
) - start_index
;
82 bitmap_set(ret
->high_slices
, start_index
, count
);
86 static int slice_area_is_free(struct mm_struct
*mm
, unsigned long addr
,
89 struct vm_area_struct
*vma
;
91 if ((mm_ctx_slb_addr_limit(&mm
->context
) - len
) < addr
)
93 vma
= find_vma(mm
, addr
);
94 return (!vma
|| (addr
+ len
) <= vm_start_gap(vma
));
97 static int slice_low_has_vma(struct mm_struct
*mm
, unsigned long slice
)
99 return !slice_area_is_free(mm
, slice
<< SLICE_LOW_SHIFT
,
100 1ul << SLICE_LOW_SHIFT
);
103 static int slice_high_has_vma(struct mm_struct
*mm
, unsigned long slice
)
105 unsigned long start
= slice
<< SLICE_HIGH_SHIFT
;
106 unsigned long end
= start
+ (1ul << SLICE_HIGH_SHIFT
);
108 /* Hack, so that each addresses is controlled by exactly one
109 * of the high or low area bitmaps, the first high area starts
112 start
= (unsigned long)SLICE_LOW_TOP
;
114 return !slice_area_is_free(mm
, start
, end
- start
);
117 static void slice_mask_for_free(struct mm_struct
*mm
, struct slice_mask
*ret
,
118 unsigned long high_limit
)
124 bitmap_zero(ret
->high_slices
, SLICE_NUM_HIGH
);
126 for (i
= 0; i
< SLICE_NUM_LOW
; i
++)
127 if (!slice_low_has_vma(mm
, i
))
128 ret
->low_slices
|= 1u << i
;
130 if (slice_addr_is_low(high_limit
- 1))
133 for (i
= 0; i
< GET_HIGH_SLICE_INDEX(high_limit
); i
++)
134 if (!slice_high_has_vma(mm
, i
))
135 __set_bit(i
, ret
->high_slices
);
138 static bool slice_check_range_fits(struct mm_struct
*mm
,
139 const struct slice_mask
*available
,
140 unsigned long start
, unsigned long len
)
142 unsigned long end
= start
+ len
- 1;
145 if (slice_addr_is_low(start
)) {
146 unsigned long mend
= min(end
,
147 (unsigned long)(SLICE_LOW_TOP
- 1));
149 low_slices
= (1u << (GET_LOW_SLICE_INDEX(mend
) + 1))
150 - (1u << GET_LOW_SLICE_INDEX(start
));
152 if ((low_slices
& available
->low_slices
) != low_slices
)
155 if (SLICE_NUM_HIGH
&& !slice_addr_is_low(end
)) {
156 unsigned long start_index
= GET_HIGH_SLICE_INDEX(start
);
157 unsigned long align_end
= ALIGN(end
, (1UL << SLICE_HIGH_SHIFT
));
158 unsigned long count
= GET_HIGH_SLICE_INDEX(align_end
) - start_index
;
161 for (i
= start_index
; i
< start_index
+ count
; i
++) {
162 if (!test_bit(i
, available
->high_slices
))
170 static void slice_flush_segments(void *parm
)
173 struct mm_struct
*mm
= parm
;
176 if (mm
!= current
->active_mm
)
179 copy_mm_to_paca(current
->active_mm
);
181 local_irq_save(flags
);
182 slb_flush_and_restore_bolted();
183 local_irq_restore(flags
);
187 static void slice_convert(struct mm_struct
*mm
,
188 const struct slice_mask
*mask
, int psize
)
190 int index
, mask_index
;
191 /* Write the new slice psize bits */
192 unsigned char *hpsizes
, *lpsizes
;
193 struct slice_mask
*psize_mask
, *old_mask
;
194 unsigned long i
, flags
;
197 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm
, psize
);
198 slice_print_mask(" mask", mask
);
200 psize_mask
= slice_mask_for_size(&mm
->context
, psize
);
202 /* We need to use a spinlock here to protect against
203 * concurrent 64k -> 4k demotion ...
205 spin_lock_irqsave(&slice_convert_lock
, flags
);
207 lpsizes
= mm_ctx_low_slices(&mm
->context
);
208 for (i
= 0; i
< SLICE_NUM_LOW
; i
++) {
209 if (!(mask
->low_slices
& (1u << i
)))
212 mask_index
= i
& 0x1;
215 /* Update the slice_mask */
216 old_psize
= (lpsizes
[index
] >> (mask_index
* 4)) & 0xf;
217 old_mask
= slice_mask_for_size(&mm
->context
, old_psize
);
218 old_mask
->low_slices
&= ~(1u << i
);
219 psize_mask
->low_slices
|= 1u << i
;
221 /* Update the sizes array */
222 lpsizes
[index
] = (lpsizes
[index
] & ~(0xf << (mask_index
* 4))) |
223 (((unsigned long)psize
) << (mask_index
* 4));
226 hpsizes
= mm_ctx_high_slices(&mm
->context
);
227 for (i
= 0; i
< GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm
->context
)); i
++) {
228 if (!test_bit(i
, mask
->high_slices
))
231 mask_index
= i
& 0x1;
234 /* Update the slice_mask */
235 old_psize
= (hpsizes
[index
] >> (mask_index
* 4)) & 0xf;
236 old_mask
= slice_mask_for_size(&mm
->context
, old_psize
);
237 __clear_bit(i
, old_mask
->high_slices
);
238 __set_bit(i
, psize_mask
->high_slices
);
240 /* Update the sizes array */
241 hpsizes
[index
] = (hpsizes
[index
] & ~(0xf << (mask_index
* 4))) |
242 (((unsigned long)psize
) << (mask_index
* 4));
245 slice_dbg(" lsps=%lx, hsps=%lx\n",
246 (unsigned long)mm_ctx_low_slices(&mm
->context
),
247 (unsigned long)mm_ctx_high_slices(&mm
->context
));
249 spin_unlock_irqrestore(&slice_convert_lock
, flags
);
251 copro_flush_all_slbs(mm
);
255 * Compute which slice addr is part of;
256 * set *boundary_addr to the start or end boundary of that slice
257 * (depending on 'end' parameter);
258 * return boolean indicating if the slice is marked as available in the
259 * 'available' slice_mark.
261 static bool slice_scan_available(unsigned long addr
,
262 const struct slice_mask
*available
,
263 int end
, unsigned long *boundary_addr
)
266 if (slice_addr_is_low(addr
)) {
267 slice
= GET_LOW_SLICE_INDEX(addr
);
268 *boundary_addr
= (slice
+ end
) << SLICE_LOW_SHIFT
;
269 return !!(available
->low_slices
& (1u << slice
));
271 slice
= GET_HIGH_SLICE_INDEX(addr
);
272 *boundary_addr
= (slice
+ end
) ?
273 ((slice
+ end
) << SLICE_HIGH_SHIFT
) : SLICE_LOW_TOP
;
274 return !!test_bit(slice
, available
->high_slices
);
278 static unsigned long slice_find_area_bottomup(struct mm_struct
*mm
,
280 const struct slice_mask
*available
,
281 int psize
, unsigned long high_limit
)
283 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
284 unsigned long addr
, found
, next_end
;
285 struct vm_unmapped_area_info info
;
289 info
.align_mask
= PAGE_MASK
& ((1ul << pshift
) - 1);
290 info
.align_offset
= 0;
292 addr
= TASK_UNMAPPED_BASE
;
294 * Check till the allow max value for this mmap request
296 while (addr
< high_limit
) {
297 info
.low_limit
= addr
;
298 if (!slice_scan_available(addr
, available
, 1, &addr
))
303 * At this point [info.low_limit; addr) covers
304 * available slices only and ends at a slice boundary.
305 * Check if we need to reduce the range, or if we can
306 * extend it to cover the next available slice.
308 if (addr
>= high_limit
)
310 else if (slice_scan_available(addr
, available
, 1, &next_end
)) {
314 info
.high_limit
= addr
;
316 found
= vm_unmapped_area(&info
);
317 if (!(found
& ~PAGE_MASK
))
324 static unsigned long slice_find_area_topdown(struct mm_struct
*mm
,
326 const struct slice_mask
*available
,
327 int psize
, unsigned long high_limit
)
329 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
330 unsigned long addr
, found
, prev
;
331 struct vm_unmapped_area_info info
;
332 unsigned long min_addr
= max(PAGE_SIZE
, mmap_min_addr
);
334 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
336 info
.align_mask
= PAGE_MASK
& ((1ul << pshift
) - 1);
337 info
.align_offset
= 0;
339 addr
= mm
->mmap_base
;
341 * If we are trying to allocate above DEFAULT_MAP_WINDOW
342 * Add the different to the mmap_base.
343 * Only for that request for which high_limit is above
344 * DEFAULT_MAP_WINDOW we should apply this.
346 if (high_limit
> DEFAULT_MAP_WINDOW
)
347 addr
+= mm_ctx_slb_addr_limit(&mm
->context
) - DEFAULT_MAP_WINDOW
;
349 while (addr
> min_addr
) {
350 info
.high_limit
= addr
;
351 if (!slice_scan_available(addr
- 1, available
, 0, &addr
))
356 * At this point [addr; info.high_limit) covers
357 * available slices only and starts at a slice boundary.
358 * Check if we need to reduce the range, or if we can
359 * extend it to cover the previous available slice.
363 else if (slice_scan_available(addr
- 1, available
, 0, &prev
)) {
367 info
.low_limit
= addr
;
369 found
= vm_unmapped_area(&info
);
370 if (!(found
& ~PAGE_MASK
))
375 * A failed mmap() very likely causes application failure,
376 * so fall back to the bottom-up function here. This scenario
377 * can happen with large stack limits and large mmap()
380 return slice_find_area_bottomup(mm
, len
, available
, psize
, high_limit
);
384 static unsigned long slice_find_area(struct mm_struct
*mm
, unsigned long len
,
385 const struct slice_mask
*mask
, int psize
,
386 int topdown
, unsigned long high_limit
)
389 return slice_find_area_topdown(mm
, len
, mask
, psize
, high_limit
);
391 return slice_find_area_bottomup(mm
, len
, mask
, psize
, high_limit
);
394 static inline void slice_copy_mask(struct slice_mask
*dst
,
395 const struct slice_mask
*src
)
397 dst
->low_slices
= src
->low_slices
;
400 bitmap_copy(dst
->high_slices
, src
->high_slices
, SLICE_NUM_HIGH
);
403 static inline void slice_or_mask(struct slice_mask
*dst
,
404 const struct slice_mask
*src1
,
405 const struct slice_mask
*src2
)
407 dst
->low_slices
= src1
->low_slices
| src2
->low_slices
;
410 bitmap_or(dst
->high_slices
, src1
->high_slices
, src2
->high_slices
, SLICE_NUM_HIGH
);
413 static inline void slice_andnot_mask(struct slice_mask
*dst
,
414 const struct slice_mask
*src1
,
415 const struct slice_mask
*src2
)
417 dst
->low_slices
= src1
->low_slices
& ~src2
->low_slices
;
420 bitmap_andnot(dst
->high_slices
, src1
->high_slices
, src2
->high_slices
, SLICE_NUM_HIGH
);
423 #ifdef CONFIG_PPC_64K_PAGES
424 #define MMU_PAGE_BASE MMU_PAGE_64K
426 #define MMU_PAGE_BASE MMU_PAGE_4K
429 unsigned long slice_get_unmapped_area(unsigned long addr
, unsigned long len
,
430 unsigned long flags
, unsigned int psize
,
433 struct slice_mask good_mask
;
434 struct slice_mask potential_mask
;
435 const struct slice_mask
*maskp
;
436 const struct slice_mask
*compat_maskp
= NULL
;
437 int fixed
= (flags
& MAP_FIXED
);
438 int pshift
= max_t(int, mmu_psize_defs
[psize
].shift
, PAGE_SHIFT
);
439 unsigned long page_size
= 1UL << pshift
;
440 struct mm_struct
*mm
= current
->mm
;
441 unsigned long newaddr
;
442 unsigned long high_limit
;
444 high_limit
= DEFAULT_MAP_WINDOW
;
445 if (addr
>= high_limit
|| (fixed
&& (addr
+ len
> high_limit
)))
446 high_limit
= TASK_SIZE
;
448 if (len
> high_limit
)
450 if (len
& (page_size
- 1))
453 if (addr
& (page_size
- 1))
455 if (addr
> high_limit
- len
)
459 if (high_limit
> mm_ctx_slb_addr_limit(&mm
->context
)) {
461 * Increasing the slb_addr_limit does not require
462 * slice mask cache to be recalculated because it should
463 * be already initialised beyond the old address limit.
465 mm_ctx_set_slb_addr_limit(&mm
->context
, high_limit
);
467 on_each_cpu(slice_flush_segments
, mm
, 1);
471 BUG_ON(mm
->task_size
== 0);
472 BUG_ON(mm_ctx_slb_addr_limit(&mm
->context
) == 0);
473 VM_BUG_ON(radix_enabled());
475 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm
, psize
);
476 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
477 addr
, len
, flags
, topdown
);
479 /* If hint, make sure it matches our alignment restrictions */
480 if (!fixed
&& addr
) {
481 addr
= ALIGN(addr
, page_size
);
482 slice_dbg(" aligned addr=%lx\n", addr
);
483 /* Ignore hint if it's too large or overlaps a VMA */
484 if (addr
> high_limit
- len
|| addr
< mmap_min_addr
||
485 !slice_area_is_free(mm
, addr
, len
))
489 /* First make up a "good" mask of slices that have the right size
492 maskp
= slice_mask_for_size(&mm
->context
, psize
);
495 * Here "good" means slices that are already the right page size,
496 * "compat" means slices that have a compatible page size (i.e.
497 * 4k in a 64k pagesize kernel), and "free" means slices without
501 * check if fits in good | compat => OK
502 * check if fits in good | compat | free => convert free
505 * check if hint fits in good => OK
506 * check if hint fits in good | free => convert free
508 * search in good, found => OK
509 * search in good | free, found => convert free
510 * search in good | compat | free, found => convert free.
514 * If we support combo pages, we can allow 64k pages in 4k slices
515 * The mask copies could be avoided in most cases here if we had
516 * a pointer to good mask for the next code to use.
518 if (IS_ENABLED(CONFIG_PPC_64K_PAGES
) && psize
== MMU_PAGE_64K
) {
519 compat_maskp
= slice_mask_for_size(&mm
->context
, MMU_PAGE_4K
);
521 slice_or_mask(&good_mask
, maskp
, compat_maskp
);
523 slice_copy_mask(&good_mask
, maskp
);
525 slice_copy_mask(&good_mask
, maskp
);
528 slice_print_mask(" good_mask", &good_mask
);
530 slice_print_mask(" compat_mask", compat_maskp
);
532 /* First check hint if it's valid or if we have MAP_FIXED */
533 if (addr
!= 0 || fixed
) {
534 /* Check if we fit in the good mask. If we do, we just return,
537 if (slice_check_range_fits(mm
, &good_mask
, addr
, len
)) {
538 slice_dbg(" fits good !\n");
543 /* Now let's see if we can find something in the existing
544 * slices for that size
546 newaddr
= slice_find_area(mm
, len
, &good_mask
,
547 psize
, topdown
, high_limit
);
548 if (newaddr
!= -ENOMEM
) {
549 /* Found within the good mask, we don't have to setup,
550 * we thus return directly
552 slice_dbg(" found area at 0x%lx\n", newaddr
);
557 * We don't fit in the good mask, check what other slices are
558 * empty and thus can be converted
560 slice_mask_for_free(mm
, &potential_mask
, high_limit
);
561 slice_or_mask(&potential_mask
, &potential_mask
, &good_mask
);
562 slice_print_mask(" potential", &potential_mask
);
564 if (addr
!= 0 || fixed
) {
565 if (slice_check_range_fits(mm
, &potential_mask
, addr
, len
)) {
566 slice_dbg(" fits potential !\n");
572 /* If we have MAP_FIXED and failed the above steps, then error out */
576 slice_dbg(" search...\n");
578 /* If we had a hint that didn't work out, see if we can fit
579 * anywhere in the good area.
582 newaddr
= slice_find_area(mm
, len
, &good_mask
,
583 psize
, topdown
, high_limit
);
584 if (newaddr
!= -ENOMEM
) {
585 slice_dbg(" found area at 0x%lx\n", newaddr
);
590 /* Now let's see if we can find something in the existing slices
591 * for that size plus free slices
593 newaddr
= slice_find_area(mm
, len
, &potential_mask
,
594 psize
, topdown
, high_limit
);
596 if (IS_ENABLED(CONFIG_PPC_64K_PAGES
) && newaddr
== -ENOMEM
&&
597 psize
== MMU_PAGE_64K
) {
598 /* retry the search with 4k-page slices included */
599 slice_or_mask(&potential_mask
, &potential_mask
, compat_maskp
);
600 newaddr
= slice_find_area(mm
, len
, &potential_mask
,
601 psize
, topdown
, high_limit
);
604 if (newaddr
== -ENOMEM
)
607 slice_range_to_mask(newaddr
, len
, &potential_mask
);
608 slice_dbg(" found potential area at 0x%lx\n", newaddr
);
609 slice_print_mask(" mask", &potential_mask
);
613 * Try to allocate the context before we do slice convert
614 * so that we handle the context allocation failure gracefully.
616 if (need_extra_context(mm
, newaddr
)) {
617 if (alloc_extended_context(mm
, newaddr
) < 0)
621 slice_andnot_mask(&potential_mask
, &potential_mask
, &good_mask
);
622 if (compat_maskp
&& !fixed
)
623 slice_andnot_mask(&potential_mask
, &potential_mask
, compat_maskp
);
624 if (potential_mask
.low_slices
||
626 !bitmap_empty(potential_mask
.high_slices
, SLICE_NUM_HIGH
))) {
627 slice_convert(mm
, &potential_mask
, psize
);
628 if (psize
> MMU_PAGE_BASE
)
629 on_each_cpu(slice_flush_segments
, mm
, 1);
634 if (need_extra_context(mm
, newaddr
)) {
635 if (alloc_extended_context(mm
, newaddr
) < 0)
640 EXPORT_SYMBOL_GPL(slice_get_unmapped_area
);
642 unsigned long arch_get_unmapped_area(struct file
*filp
,
648 return slice_get_unmapped_area(addr
, len
, flags
,
649 mm_ctx_user_psize(¤t
->mm
->context
), 0);
652 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
653 const unsigned long addr0
,
654 const unsigned long len
,
655 const unsigned long pgoff
,
656 const unsigned long flags
)
658 return slice_get_unmapped_area(addr0
, len
, flags
,
659 mm_ctx_user_psize(¤t
->mm
->context
), 1);
662 unsigned int notrace
get_slice_psize(struct mm_struct
*mm
, unsigned long addr
)
664 unsigned char *psizes
;
665 int index
, mask_index
;
667 VM_BUG_ON(radix_enabled());
669 if (slice_addr_is_low(addr
)) {
670 psizes
= mm_ctx_low_slices(&mm
->context
);
671 index
= GET_LOW_SLICE_INDEX(addr
);
673 psizes
= mm_ctx_high_slices(&mm
->context
);
674 index
= GET_HIGH_SLICE_INDEX(addr
);
676 mask_index
= index
& 0x1;
677 return (psizes
[index
>> 1] >> (mask_index
* 4)) & 0xf;
679 EXPORT_SYMBOL_GPL(get_slice_psize
);
681 void slice_init_new_context_exec(struct mm_struct
*mm
)
683 unsigned char *hpsizes
, *lpsizes
;
684 struct slice_mask
*mask
;
685 unsigned int psize
= mmu_virtual_psize
;
687 slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm
);
690 * In the case of exec, use the default limit. In the
691 * case of fork it is just inherited from the mm being
694 mm_ctx_set_slb_addr_limit(&mm
->context
, SLB_ADDR_LIMIT_DEFAULT
);
695 mm_ctx_set_user_psize(&mm
->context
, psize
);
698 * Set all slice psizes to the default.
700 lpsizes
= mm_ctx_low_slices(&mm
->context
);
701 memset(lpsizes
, (psize
<< 4) | psize
, SLICE_NUM_LOW
>> 1);
703 hpsizes
= mm_ctx_high_slices(&mm
->context
);
704 memset(hpsizes
, (psize
<< 4) | psize
, SLICE_NUM_HIGH
>> 1);
707 * Slice mask cache starts zeroed, fill the default size cache.
709 mask
= slice_mask_for_size(&mm
->context
, psize
);
710 mask
->low_slices
= ~0UL;
712 bitmap_fill(mask
->high_slices
, SLICE_NUM_HIGH
);
715 #ifdef CONFIG_PPC_BOOK3S_64
716 void slice_setup_new_exec(void)
718 struct mm_struct
*mm
= current
->mm
;
720 slice_dbg("slice_setup_new_exec(mm=%p)\n", mm
);
722 if (!is_32bit_task())
725 mm_ctx_set_slb_addr_limit(&mm
->context
, DEFAULT_MAP_WINDOW
);
729 void slice_set_range_psize(struct mm_struct
*mm
, unsigned long start
,
730 unsigned long len
, unsigned int psize
)
732 struct slice_mask mask
;
734 VM_BUG_ON(radix_enabled());
736 slice_range_to_mask(start
, len
, &mask
);
737 slice_convert(mm
, &mask
, psize
);
740 #ifdef CONFIG_HUGETLB_PAGE
742 * is_hugepage_only_range() is used by generic code to verify whether
743 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
745 * until the generic code provides a more generic hook and/or starts
746 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
747 * here knows how to deal with), we hijack it to keep standard mappings
750 * because of that generic code limitation, MAP_FIXED mapping cannot
751 * "convert" back a slice with no VMAs to the standard page size, only
752 * get_unmapped_area() can. It would be possible to fix it here but I
753 * prefer working on fixing the generic code instead.
755 * WARNING: This will not work if hugetlbfs isn't enabled since the
756 * generic code will redefine that function as 0 in that. This is ok
757 * for now as we only use slices with hugetlbfs enabled. This should
758 * be fixed as the generic code gets fixed.
760 int slice_is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
763 const struct slice_mask
*maskp
;
764 unsigned int psize
= mm_ctx_user_psize(&mm
->context
);
766 VM_BUG_ON(radix_enabled());
768 maskp
= slice_mask_for_size(&mm
->context
, psize
);
770 /* We need to account for 4k slices too */
771 if (IS_ENABLED(CONFIG_PPC_64K_PAGES
) && psize
== MMU_PAGE_64K
) {
772 const struct slice_mask
*compat_maskp
;
773 struct slice_mask available
;
775 compat_maskp
= slice_mask_for_size(&mm
->context
, MMU_PAGE_4K
);
776 slice_or_mask(&available
, maskp
, compat_maskp
);
777 return !slice_check_range_fits(mm
, &available
, addr
, len
);
780 return !slice_check_range_fits(mm
, maskp
, addr
, len
);