1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/hugetlb.h>
4 #include <linux/security.h>
5 #include <asm/cacheflush.h>
6 #include <asm/machdep.h>
10 void radix__flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
13 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
15 psize
= hstate_get_psize(hstate
);
16 radix__flush_tlb_page_psize(vma
->vm_mm
, vmaddr
, psize
);
19 void radix__local_flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
22 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
24 psize
= hstate_get_psize(hstate
);
25 radix__local_flush_tlb_page_psize(vma
->vm_mm
, vmaddr
, psize
);
28 void radix__flush_hugetlb_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
32 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
34 psize
= hstate_get_psize(hstate
);
35 radix__flush_tlb_range_psize(vma
->vm_mm
, start
, end
, psize
);
39 * A vairant of hugetlb_get_unmapped_area doing topdown search
40 * FIXME!! should we do as x86 does or non hugetlb area does ?
41 * ie, use topdown or not based on mmap_is_legacy check ?
44 radix__hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
45 unsigned long len
, unsigned long pgoff
,
48 struct mm_struct
*mm
= current
->mm
;
49 struct vm_area_struct
*vma
;
50 struct hstate
*h
= hstate_file(file
);
51 int fixed
= (flags
& MAP_FIXED
);
52 unsigned long high_limit
;
53 struct vm_unmapped_area_info info
;
55 high_limit
= DEFAULT_MAP_WINDOW
;
56 if (addr
>= high_limit
|| (fixed
&& (addr
+ len
> high_limit
)))
57 high_limit
= TASK_SIZE
;
59 if (len
& ~huge_page_mask(h
))
65 if (addr
> high_limit
- len
)
67 if (prepare_hugepage_range(file
, addr
, len
))
73 addr
= ALIGN(addr
, huge_page_size(h
));
74 vma
= find_vma(mm
, addr
);
75 if (high_limit
- len
>= addr
&& addr
>= mmap_min_addr
&&
76 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
80 * We are always doing an topdown search here. Slice code
83 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
85 info
.low_limit
= max(PAGE_SIZE
, mmap_min_addr
);
86 info
.high_limit
= mm
->mmap_base
+ (high_limit
- DEFAULT_MAP_WINDOW
);
87 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
88 info
.align_offset
= 0;
90 return vm_unmapped_area(&info
);
93 void radix__huge_ptep_modify_prot_commit(struct vm_area_struct
*vma
,
94 unsigned long addr
, pte_t
*ptep
,
95 pte_t old_pte
, pte_t pte
)
97 struct mm_struct
*mm
= vma
->vm_mm
;
100 * To avoid NMMU hang while relaxing access we need to flush the tlb before
101 * we set the new value.
103 if (is_pte_rw_upgrade(pte_val(old_pte
), pte_val(pte
)) &&
104 (atomic_read(&mm
->context
.copros
) > 0))
105 radix__flush_hugetlb_page(vma
, addr
);
107 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, pte
);