nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / arch / powerpc / mm / subpage-prot.c
blobe4f8f1fc81a570a38e2ab4e63a35023558633309
1 /*
2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
17 #include <asm/pgtable.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
22 * Free all pages allocated for subpage protection maps and pointers.
23 * Also makes sure that the subpage_prot_table structure is
24 * reinitialized for the next user.
26 void subpage_prot_free(struct mm_struct *mm)
28 struct subpage_prot_table *spt = &mm->context.spt;
29 unsigned long i, j, addr;
30 u32 **p;
32 for (i = 0; i < 4; ++i) {
33 if (spt->low_prot[i]) {
34 free_page((unsigned long)spt->low_prot[i]);
35 spt->low_prot[i] = NULL;
38 addr = 0;
39 for (i = 0; i < 2; ++i) {
40 p = spt->protptrs[i];
41 if (!p)
42 continue;
43 spt->protptrs[i] = NULL;
44 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
45 ++j, addr += PAGE_SIZE)
46 if (p[j])
47 free_page((unsigned long)p[j]);
48 free_page((unsigned long)p);
50 spt->maxaddr = 0;
53 void subpage_prot_init_new_context(struct mm_struct *mm)
55 struct subpage_prot_table *spt = &mm->context.spt;
57 memset(spt, 0, sizeof(*spt));
60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
61 int npages)
63 pgd_t *pgd;
64 pud_t *pud;
65 pmd_t *pmd;
66 pte_t *pte;
67 spinlock_t *ptl;
69 pgd = pgd_offset(mm, addr);
70 if (pgd_none(*pgd))
71 return;
72 pud = pud_offset(pgd, addr);
73 if (pud_none(*pud))
74 return;
75 pmd = pmd_offset(pud, addr);
76 if (pmd_none(*pmd))
77 return;
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
79 arch_enter_lazy_mmu_mode();
80 for (; npages > 0; --npages) {
81 pte_update(mm, addr, pte, 0, 0);
82 addr += PAGE_SIZE;
83 ++pte;
85 arch_leave_lazy_mmu_mode();
86 pte_unmap_unlock(pte - 1, ptl);
90 * Clear the subpage protection map for an address range, allowing
91 * all accesses that are allowed by the pte permissions.
93 static void subpage_prot_clear(unsigned long addr, unsigned long len)
95 struct mm_struct *mm = current->mm;
96 struct subpage_prot_table *spt = &mm->context.spt;
97 u32 **spm, *spp;
98 int i, nw;
99 unsigned long next, limit;
101 down_write(&mm->mmap_sem);
102 limit = addr + len;
103 if (limit > spt->maxaddr)
104 limit = spt->maxaddr;
105 for (; addr < limit; addr = next) {
106 next = pmd_addr_end(addr, limit);
107 if (addr < 0x100000000) {
108 spm = spt->low_prot;
109 } else {
110 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
111 if (!spm)
112 continue;
114 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
115 if (!spp)
116 continue;
117 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
119 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
120 nw = PTRS_PER_PTE - i;
121 if (addr + (nw << PAGE_SHIFT) > next)
122 nw = (next - addr) >> PAGE_SHIFT;
124 memset(spp, 0, nw * sizeof(u32));
126 /* now flush any existing HPTEs for the range */
127 hpte_flush_range(mm, addr, nw);
129 up_write(&mm->mmap_sem);
133 * Copy in a subpage protection map for an address range.
134 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
135 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
136 * 2 or 3 to prevent all accesses.
137 * Note that the normal page protections also apply; the subpage
138 * protection mechanism is an additional constraint, so putting 0
139 * in a 2-bit field won't allow writes to a page that is otherwise
140 * write-protected.
142 long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
144 struct mm_struct *mm = current->mm;
145 struct subpage_prot_table *spt = &mm->context.spt;
146 u32 **spm, *spp;
147 int i, nw;
148 unsigned long next, limit;
149 int err;
151 /* Check parameters */
152 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
153 addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
154 return -EINVAL;
156 if (is_hugepage_only_range(mm, addr, len))
157 return -EINVAL;
159 if (!map) {
160 /* Clear out the protection map for the address range */
161 subpage_prot_clear(addr, len);
162 return 0;
165 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
166 return -EFAULT;
168 down_write(&mm->mmap_sem);
169 for (limit = addr + len; addr < limit; addr = next) {
170 next = pmd_addr_end(addr, limit);
171 err = -ENOMEM;
172 if (addr < 0x100000000) {
173 spm = spt->low_prot;
174 } else {
175 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
176 if (!spm) {
177 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
178 if (!spm)
179 goto out;
180 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
183 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
184 spp = *spm;
185 if (!spp) {
186 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
187 if (!spp)
188 goto out;
189 *spm = spp;
191 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
193 local_irq_disable();
194 demote_segment_4k(mm, addr);
195 local_irq_enable();
197 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
198 nw = PTRS_PER_PTE - i;
199 if (addr + (nw << PAGE_SHIFT) > next)
200 nw = (next - addr) >> PAGE_SHIFT;
202 up_write(&mm->mmap_sem);
203 err = -EFAULT;
204 if (__copy_from_user(spp, map, nw * sizeof(u32)))
205 goto out2;
206 map += nw;
207 down_write(&mm->mmap_sem);
209 /* now flush any existing HPTEs for the range */
210 hpte_flush_range(mm, addr, nw);
212 if (limit > spt->maxaddr)
213 spt->maxaddr = limit;
214 err = 0;
215 out:
216 up_write(&mm->mmap_sem);
217 out2:
218 return err;