2 * IBM System z Huge TLB Page Support for Kernel.
4 * Copyright IBM Corp. 2007
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
9 #include <linux/hugetlb.h>
11 static inline pmd_t
__pte_to_pmd(pte_t pte
)
13 int none
, young
, prot
;
17 * Convert encoding pte bits pmd bits
18 * .IR...wrdytp ..R...I...y.
19 * empty .10...000000 -> ..0...1...0.
20 * prot-none, clean, old .11...000001 -> ..0...1...1.
21 * prot-none, clean, young .11...000101 -> ..1...1...1.
22 * prot-none, dirty, old .10...001001 -> ..0...1...1.
23 * prot-none, dirty, young .10...001101 -> ..1...1...1.
24 * read-only, clean, old .11...010001 -> ..1...1...0.
25 * read-only, clean, young .01...010101 -> ..1...0...1.
26 * read-only, dirty, old .11...011001 -> ..1...1...0.
27 * read-only, dirty, young .01...011101 -> ..1...0...1.
28 * read-write, clean, old .11...110001 -> ..0...1...0.
29 * read-write, clean, young .01...110101 -> ..0...0...1.
30 * read-write, dirty, old .10...111001 -> ..0...1...0.
31 * read-write, dirty, young .00...111101 -> ..0...0...1.
32 * Huge ptes are dirty by definition, a clean pte is made dirty
35 if (pte_present(pte
)) {
36 pmd_val(pmd
) = pte_val(pte
) & PAGE_MASK
;
37 if (pte_val(pte
) & _PAGE_INVALID
)
38 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
39 none
= (pte_val(pte
) & _PAGE_PRESENT
) &&
40 !(pte_val(pte
) & _PAGE_READ
) &&
41 !(pte_val(pte
) & _PAGE_WRITE
);
42 prot
= (pte_val(pte
) & _PAGE_PROTECT
) &&
43 !(pte_val(pte
) & _PAGE_WRITE
);
44 young
= pte_val(pte
) & _PAGE_YOUNG
;
46 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
47 if (prot
|| (none
&& young
))
48 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
50 pmd_val(pmd
) = _SEGMENT_ENTRY_INVALID
;
54 static inline pte_t
__pmd_to_pte(pmd_t pmd
)
59 * Convert encoding pmd bits pte bits
60 * ..R...I...y. .IR...wrdytp
61 * empty ..0...1...0. -> .10...000000
62 * prot-none, old ..0...1...1. -> .10...001001
63 * prot-none, young ..1...1...1. -> .10...001101
64 * read-only, old ..1...1...0. -> .11...011001
65 * read-only, young ..1...0...1. -> .01...011101
66 * read-write, old ..0...1...0. -> .10...111001
67 * read-write, young ..0...0...1. -> .00...111101
68 * Huge ptes are dirty by definition
70 if (pmd_present(pmd
)) {
71 pte_val(pte
) = _PAGE_PRESENT
| _PAGE_LARGE
| _PAGE_DIRTY
|
72 (pmd_val(pmd
) & PAGE_MASK
);
73 if (pmd_val(pmd
) & _SEGMENT_ENTRY_INVALID
)
74 pte_val(pte
) |= _PAGE_INVALID
;
75 if (pmd_prot_none(pmd
)) {
76 if (pmd_val(pmd
) & _SEGMENT_ENTRY_PROTECT
)
77 pte_val(pte
) |= _PAGE_YOUNG
;
79 pte_val(pte
) |= _PAGE_READ
;
80 if (pmd_val(pmd
) & _SEGMENT_ENTRY_PROTECT
)
81 pte_val(pte
) |= _PAGE_PROTECT
;
83 pte_val(pte
) |= _PAGE_WRITE
;
84 if (pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
)
85 pte_val(pte
) |= _PAGE_YOUNG
;
88 pte_val(pte
) = _PAGE_INVALID
;
92 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
93 pte_t
*ptep
, pte_t pte
)
97 pmd
= __pte_to_pmd(pte
);
98 if (!MACHINE_HAS_HPAGE
) {
99 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_ORIGIN
;
100 pmd_val(pmd
) |= pte_page(pte
)[1].index
;
102 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_CO
;
103 *(pmd_t
*) ptep
= pmd
;
106 pte_t
huge_ptep_get(pte_t
*ptep
)
108 unsigned long origin
;
111 pmd
= *(pmd_t
*) ptep
;
112 if (!MACHINE_HAS_HPAGE
&& pmd_present(pmd
)) {
113 origin
= pmd_val(pmd
) & _SEGMENT_ENTRY_ORIGIN
;
114 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_ORIGIN
;
115 pmd_val(pmd
) |= *(unsigned long *) origin
;
117 return __pmd_to_pte(pmd
);
120 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
121 unsigned long addr
, pte_t
*ptep
)
123 pmd_t
*pmdp
= (pmd_t
*) ptep
;
124 pte_t pte
= huge_ptep_get(ptep
);
126 if (MACHINE_HAS_IDTE
)
127 __pmd_idte(addr
, pmdp
);
130 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
134 int arch_prepare_hugepage(struct page
*page
)
136 unsigned long addr
= page_to_phys(page
);
141 if (MACHINE_HAS_HPAGE
)
144 ptep
= (pte_t
*) pte_alloc_one(&init_mm
, addr
);
149 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
150 set_pte_at(&init_mm
, addr
+ i
* PAGE_SIZE
, ptep
+ i
, pte
);
151 pte_val(pte
) += PAGE_SIZE
;
153 page
[1].index
= (unsigned long) ptep
;
157 void arch_release_hugepage(struct page
*page
)
161 if (MACHINE_HAS_HPAGE
)
164 ptep
= (pte_t
*) page
[1].index
;
167 clear_table((unsigned long *) ptep
, _PAGE_INVALID
,
168 PTRS_PER_PTE
* sizeof(pte_t
));
169 page_table_free(&init_mm
, (unsigned long *) ptep
);
173 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
174 unsigned long addr
, unsigned long sz
)
180 pgdp
= pgd_offset(mm
, addr
);
181 pudp
= pud_alloc(mm
, pgdp
, addr
);
183 pmdp
= pmd_alloc(mm
, pudp
, addr
);
184 return (pte_t
*) pmdp
;
187 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
193 pgdp
= pgd_offset(mm
, addr
);
194 if (pgd_present(*pgdp
)) {
195 pudp
= pud_offset(pgdp
, addr
);
196 if (pud_present(*pudp
))
197 pmdp
= pmd_offset(pudp
, addr
);
199 return (pte_t
*) pmdp
;
202 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
207 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
210 return ERR_PTR(-EINVAL
);
213 int pmd_huge(pmd_t pmd
)
215 if (!MACHINE_HAS_HPAGE
)
218 return !!(pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
);
221 int pud_huge(pud_t pud
)
226 int pmd_huge_support(void)
231 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
232 pmd_t
*pmdp
, int write
)
236 if (!MACHINE_HAS_HPAGE
)
239 page
= pmd_page(*pmdp
);
241 page
+= ((address
& ~HPAGE_MASK
) >> PAGE_SHIFT
);