2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/types.h>
15 #include <linux/hugetlb.h>
17 #include <asm/pgtable.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
22 * Free all pages allocated for subpage protection maps and pointers.
23 * Also makes sure that the subpage_prot_table structure is
24 * reinitialized for the next user.
26 void subpage_prot_free(struct mm_struct
*mm
)
28 struct subpage_prot_table
*spt
= &mm
->context
.spt
;
29 unsigned long i
, j
, addr
;
32 for (i
= 0; i
< 4; ++i
) {
33 if (spt
->low_prot
[i
]) {
34 free_page((unsigned long)spt
->low_prot
[i
]);
35 spt
->low_prot
[i
] = NULL
;
39 for (i
= 0; i
< 2; ++i
) {
43 spt
->protptrs
[i
] = NULL
;
44 for (j
= 0; j
< SBP_L2_COUNT
&& addr
< spt
->maxaddr
;
45 ++j
, addr
+= PAGE_SIZE
)
47 free_page((unsigned long)p
[j
]);
48 free_page((unsigned long)p
);
53 void subpage_prot_init_new_context(struct mm_struct
*mm
)
55 struct subpage_prot_table
*spt
= &mm
->context
.spt
;
57 memset(spt
, 0, sizeof(*spt
));
60 static void hpte_flush_range(struct mm_struct
*mm
, unsigned long addr
,
69 pgd
= pgd_offset(mm
, addr
);
72 pud
= pud_offset(pgd
, addr
);
75 pmd
= pmd_offset(pud
, addr
);
78 pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
79 arch_enter_lazy_mmu_mode();
80 for (; npages
> 0; --npages
) {
81 pte_update(mm
, addr
, pte
, 0, 0, 0);
85 arch_leave_lazy_mmu_mode();
86 pte_unmap_unlock(pte
- 1, ptl
);
90 * Clear the subpage protection map for an address range, allowing
91 * all accesses that are allowed by the pte permissions.
93 static void subpage_prot_clear(unsigned long addr
, unsigned long len
)
95 struct mm_struct
*mm
= current
->mm
;
96 struct subpage_prot_table
*spt
= &mm
->context
.spt
;
100 unsigned long next
, limit
;
102 down_write(&mm
->mmap_sem
);
104 if (limit
> spt
->maxaddr
)
105 limit
= spt
->maxaddr
;
106 for (; addr
< limit
; addr
= next
) {
107 next
= pmd_addr_end(addr
, limit
);
108 if (addr
< 0x100000000UL
) {
111 spm
= spt
->protptrs
[addr
>> SBP_L3_SHIFT
];
115 spp
= spm
[(addr
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1)];
118 spp
+= (addr
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1);
120 i
= (addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
121 nw
= PTRS_PER_PTE
- i
;
122 if (addr
+ (nw
<< PAGE_SHIFT
) > next
)
123 nw
= (next
- addr
) >> PAGE_SHIFT
;
125 memset(spp
, 0, nw
* sizeof(u32
));
127 /* now flush any existing HPTEs for the range */
128 hpte_flush_range(mm
, addr
, nw
);
130 up_write(&mm
->mmap_sem
);
133 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
134 static int subpage_walk_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
135 unsigned long end
, struct mm_walk
*walk
)
137 struct vm_area_struct
*vma
= walk
->vma
;
138 split_huge_pmd(vma
, pmd
, addr
);
142 static void subpage_mark_vma_nohuge(struct mm_struct
*mm
, unsigned long addr
,
145 struct vm_area_struct
*vma
;
146 struct mm_walk subpage_proto_walk
= {
148 .pmd_entry
= subpage_walk_pmd_entry
,
152 * We don't try too hard, we just mark all the vma in that range
153 * VM_NOHUGEPAGE and split them.
155 vma
= find_vma(mm
, addr
);
157 * If the range is in unmapped range, just return
159 if (vma
&& ((addr
+ len
) <= vma
->vm_start
))
163 if (vma
->vm_start
>= (addr
+ len
))
165 vma
->vm_flags
|= VM_NOHUGEPAGE
;
166 walk_page_vma(vma
, &subpage_proto_walk
);
171 static void subpage_mark_vma_nohuge(struct mm_struct
*mm
, unsigned long addr
,
179 * Copy in a subpage protection map for an address range.
180 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
181 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
182 * 2 or 3 to prevent all accesses.
183 * Note that the normal page protections also apply; the subpage
184 * protection mechanism is an additional constraint, so putting 0
185 * in a 2-bit field won't allow writes to a page that is otherwise
188 long sys_subpage_prot(unsigned long addr
, unsigned long len
, u32 __user
*map
)
190 struct mm_struct
*mm
= current
->mm
;
191 struct subpage_prot_table
*spt
= &mm
->context
.spt
;
195 unsigned long next
, limit
;
198 /* Check parameters */
199 if ((addr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) ||
200 addr
>= TASK_SIZE
|| len
>= TASK_SIZE
|| addr
+ len
> TASK_SIZE
)
203 if (is_hugepage_only_range(mm
, addr
, len
))
207 /* Clear out the protection map for the address range */
208 subpage_prot_clear(addr
, len
);
212 if (!access_ok(VERIFY_READ
, map
, (len
>> PAGE_SHIFT
) * sizeof(u32
)))
215 down_write(&mm
->mmap_sem
);
216 subpage_mark_vma_nohuge(mm
, addr
, len
);
217 for (limit
= addr
+ len
; addr
< limit
; addr
= next
) {
218 next
= pmd_addr_end(addr
, limit
);
220 if (addr
< 0x100000000UL
) {
223 spm
= spt
->protptrs
[addr
>> SBP_L3_SHIFT
];
225 spm
= (u32
**)get_zeroed_page(GFP_KERNEL
);
228 spt
->protptrs
[addr
>> SBP_L3_SHIFT
] = spm
;
231 spm
+= (addr
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1);
234 spp
= (u32
*)get_zeroed_page(GFP_KERNEL
);
239 spp
+= (addr
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1);
242 demote_segment_4k(mm
, addr
);
245 i
= (addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
246 nw
= PTRS_PER_PTE
- i
;
247 if (addr
+ (nw
<< PAGE_SHIFT
) > next
)
248 nw
= (next
- addr
) >> PAGE_SHIFT
;
250 up_write(&mm
->mmap_sem
);
252 if (__copy_from_user(spp
, map
, nw
* sizeof(u32
)))
255 down_write(&mm
->mmap_sem
);
257 /* now flush any existing HPTEs for the range */
258 hpte_flush_range(mm
, addr
, nw
);
260 if (limit
> spt
->maxaddr
)
261 spt
->maxaddr
= limit
;
264 up_write(&mm
->mmap_sem
);