Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / s390 / mm / hugetlbpage.c
blob597bb2d27c3c0dbad5e5115396c5df483144d9d3
1 /*
2 * IBM System z Huge TLB Page Support for Kernel.
4 * Copyright 2007 IBM Corp.
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
12 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval)
15 pmd_t *pmdp = (pmd_t *) pteptr;
16 unsigned long mask;
18 if (!MACHINE_HAS_HPAGE) {
19 pteptr = (pte_t *) pte_page(pteval)[1].index;
20 mask = pte_val(pteval) &
21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
25 pmd_val(*pmdp) = pte_val(pteval);
28 int arch_prepare_hugepage(struct page *page)
30 unsigned long addr = page_to_phys(page);
31 pte_t pte;
32 pte_t *ptep;
33 int i;
35 if (MACHINE_HAS_HPAGE)
36 return 0;
38 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
39 if (!ptep)
40 return -ENOMEM;
42 pte = mk_pte(page, PAGE_RW);
43 for (i = 0; i < PTRS_PER_PTE; i++) {
44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
45 pte_val(pte) += PAGE_SIZE;
47 page[1].index = (unsigned long) ptep;
48 return 0;
51 void arch_release_hugepage(struct page *page)
53 pte_t *ptep;
55 if (MACHINE_HAS_HPAGE)
56 return;
58 ptep = (pte_t *) page[1].index;
59 if (!ptep)
60 return;
61 page_table_free(&init_mm, (unsigned long *) ptep);
62 page[1].index = 0;
65 pte_t *huge_pte_alloc(struct mm_struct *mm,
66 unsigned long addr, unsigned long sz)
68 pgd_t *pgdp;
69 pud_t *pudp;
70 pmd_t *pmdp = NULL;
72 pgdp = pgd_offset(mm, addr);
73 pudp = pud_alloc(mm, pgdp, addr);
74 if (pudp)
75 pmdp = pmd_alloc(mm, pudp, addr);
76 return (pte_t *) pmdp;
79 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
81 pgd_t *pgdp;
82 pud_t *pudp;
83 pmd_t *pmdp = NULL;
85 pgdp = pgd_offset(mm, addr);
86 if (pgd_present(*pgdp)) {
87 pudp = pud_offset(pgdp, addr);
88 if (pud_present(*pudp))
89 pmdp = pmd_offset(pudp, addr);
91 return (pte_t *) pmdp;
94 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
96 return 0;
99 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
100 int write)
102 return ERR_PTR(-EINVAL);
105 int pmd_huge(pmd_t pmd)
107 if (!MACHINE_HAS_HPAGE)
108 return 0;
110 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
113 int pud_huge(pud_t pud)
115 return 0;
118 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
119 pmd_t *pmdp, int write)
121 struct page *page;
123 if (!MACHINE_HAS_HPAGE)
124 return NULL;
126 page = pmd_page(*pmdp);
127 if (page)
128 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
129 return page;