2 #include <linux/hugetlb.h>
3 #include <asm/pgtable.h>
4 #include <asm/pgalloc.h>
5 #include <asm/cacheflush.h>
6 #include <asm/machdep.h>
10 void radix__flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
13 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
15 psize
= hstate_get_psize(hstate
);
16 radix__flush_tlb_page_psize(vma
->vm_mm
, vmaddr
, psize
);
19 void radix__local_flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
22 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
24 psize
= hstate_get_psize(hstate
);
25 radix__local_flush_tlb_page_psize(vma
->vm_mm
, vmaddr
, psize
);
28 void radix__flush_hugetlb_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
32 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
34 psize
= hstate_get_psize(hstate
);
35 radix__flush_tlb_range_psize(vma
->vm_mm
, start
, end
, psize
);
39 * A vairant of hugetlb_get_unmapped_area doing topdown search
40 * FIXME!! should we do as x86 does or non hugetlb area does ?
41 * ie, use topdown or not based on mmap_is_legacy check ?
44 radix__hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
45 unsigned long len
, unsigned long pgoff
,
48 struct mm_struct
*mm
= current
->mm
;
49 struct vm_area_struct
*vma
;
50 struct hstate
*h
= hstate_file(file
);
51 struct vm_unmapped_area_info info
;
53 if (len
& ~huge_page_mask(h
))
58 if (flags
& MAP_FIXED
) {
59 if (prepare_hugepage_range(file
, addr
, len
))
65 addr
= ALIGN(addr
, huge_page_size(h
));
66 vma
= find_vma(mm
, addr
);
67 if (TASK_SIZE
- len
>= addr
&&
68 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
72 * We are always doing an topdown search here. Slice code
75 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
77 info
.low_limit
= PAGE_SIZE
;
78 info
.high_limit
= current
->mm
->mmap_base
;
79 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
80 info
.align_offset
= 0;
81 return vm_unmapped_area(&info
);