Staging: altera: move .h file to proper place
[zen-stable.git] / arch / s390 / mm / pgtable.c
blobb09763fe5da1a5385f1b77c17f5949c0ffdd592d
1 /*
2 * Copyright IBM Corp. 2007,2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
20 #include <asm/system.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
35 #define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
39 static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43 static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
48 if (batch)
49 return batch;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
51 if (batch) {
52 batch->mm = mm;
53 batch->pgt_index = 0;
54 batch->crst_index = RCU_FREELIST_SIZE;
55 *batchp = batch;
57 return batch;
60 static void rcu_table_freelist_callback(struct rcu_head *head)
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
68 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
69 free_page((unsigned long) batch);
72 void rcu_table_freelist_finish(void)
74 struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
75 struct rcu_table_freelist *batch = *batchp;
77 if (!batch)
78 goto out;
79 call_rcu(&batch->rcu, rcu_table_freelist_callback);
80 *batchp = NULL;
81 out:
82 put_cpu_var(rcu_table_freelist);
85 static void smp_sync(void *arg)
89 #ifndef CONFIG_64BIT
90 #define ALLOC_ORDER 1
91 #define TABLES_PER_PAGE 4
92 #define FRAG_MASK 15UL
93 #define SECOND_HALVES 10UL
95 void clear_table_pgstes(unsigned long *table)
97 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
98 memset(table + 256, 0, PAGE_SIZE/4);
99 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
100 memset(table + 768, 0, PAGE_SIZE/4);
103 #else
104 #define ALLOC_ORDER 2
105 #define TABLES_PER_PAGE 2
106 #define FRAG_MASK 3UL
107 #define SECOND_HALVES 2UL
109 void clear_table_pgstes(unsigned long *table)
111 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
112 memset(table + 256, 0, PAGE_SIZE/2);
115 #endif
117 unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
118 EXPORT_SYMBOL(VMALLOC_START);
120 static int __init parse_vmalloc(char *arg)
122 if (!arg)
123 return -EINVAL;
124 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
125 return 0;
127 early_param("vmalloc", parse_vmalloc);
129 unsigned long *crst_table_alloc(struct mm_struct *mm)
131 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
133 if (!page)
134 return NULL;
135 return (unsigned long *) page_to_phys(page);
138 void crst_table_free(struct mm_struct *mm, unsigned long *table)
140 free_pages((unsigned long) table, ALLOC_ORDER);
143 void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
145 struct rcu_table_freelist *batch;
147 preempt_disable();
148 if (atomic_read(&mm->mm_users) < 2 &&
149 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
150 crst_table_free(mm, table);
151 goto out;
153 batch = rcu_table_freelist_get(mm);
154 if (!batch) {
155 smp_call_function(smp_sync, NULL, 1);
156 crst_table_free(mm, table);
157 goto out;
159 batch->table[--batch->crst_index] = table;
160 if (batch->pgt_index >= batch->crst_index)
161 rcu_table_freelist_finish();
162 out:
163 preempt_enable();
166 #ifdef CONFIG_64BIT
167 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
169 unsigned long *table, *pgd;
170 unsigned long entry;
172 BUG_ON(limit > (1UL << 53));
173 repeat:
174 table = crst_table_alloc(mm);
175 if (!table)
176 return -ENOMEM;
177 spin_lock_bh(&mm->page_table_lock);
178 if (mm->context.asce_limit < limit) {
179 pgd = (unsigned long *) mm->pgd;
180 if (mm->context.asce_limit <= (1UL << 31)) {
181 entry = _REGION3_ENTRY_EMPTY;
182 mm->context.asce_limit = 1UL << 42;
183 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
184 _ASCE_USER_BITS |
185 _ASCE_TYPE_REGION3;
186 } else {
187 entry = _REGION2_ENTRY_EMPTY;
188 mm->context.asce_limit = 1UL << 53;
189 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
190 _ASCE_USER_BITS |
191 _ASCE_TYPE_REGION2;
193 crst_table_init(table, entry);
194 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
195 mm->pgd = (pgd_t *) table;
196 mm->task_size = mm->context.asce_limit;
197 table = NULL;
199 spin_unlock_bh(&mm->page_table_lock);
200 if (table)
201 crst_table_free(mm, table);
202 if (mm->context.asce_limit < limit)
203 goto repeat;
204 update_mm(mm, current);
205 return 0;
208 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
210 pgd_t *pgd;
212 if (mm->context.asce_limit <= limit)
213 return;
214 __tlb_flush_mm(mm);
215 while (mm->context.asce_limit > limit) {
216 pgd = mm->pgd;
217 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
218 case _REGION_ENTRY_TYPE_R2:
219 mm->context.asce_limit = 1UL << 42;
220 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
221 _ASCE_USER_BITS |
222 _ASCE_TYPE_REGION3;
223 break;
224 case _REGION_ENTRY_TYPE_R3:
225 mm->context.asce_limit = 1UL << 31;
226 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
227 _ASCE_USER_BITS |
228 _ASCE_TYPE_SEGMENT;
229 break;
230 default:
231 BUG();
233 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
234 mm->task_size = mm->context.asce_limit;
235 crst_table_free(mm, (unsigned long *) pgd);
237 update_mm(mm, current);
239 #endif
242 * page table entry allocation/free routines.
244 unsigned long *page_table_alloc(struct mm_struct *mm)
246 struct page *page;
247 unsigned long *table;
248 unsigned long bits;
250 bits = (mm->context.has_pgste) ? 3UL : 1UL;
251 spin_lock_bh(&mm->context.list_lock);
252 page = NULL;
253 if (!list_empty(&mm->context.pgtable_list)) {
254 page = list_first_entry(&mm->context.pgtable_list,
255 struct page, lru);
256 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
257 page = NULL;
259 if (!page) {
260 spin_unlock_bh(&mm->context.list_lock);
261 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
262 if (!page)
263 return NULL;
264 pgtable_page_ctor(page);
265 page->flags &= ~FRAG_MASK;
266 table = (unsigned long *) page_to_phys(page);
267 if (mm->context.has_pgste)
268 clear_table_pgstes(table);
269 else
270 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
271 spin_lock_bh(&mm->context.list_lock);
272 list_add(&page->lru, &mm->context.pgtable_list);
274 table = (unsigned long *) page_to_phys(page);
275 while (page->flags & bits) {
276 table += 256;
277 bits <<= 1;
279 page->flags |= bits;
280 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
281 list_move_tail(&page->lru, &mm->context.pgtable_list);
282 spin_unlock_bh(&mm->context.list_lock);
283 return table;
286 static void __page_table_free(struct mm_struct *mm, unsigned long *table)
288 struct page *page;
289 unsigned long bits;
291 bits = ((unsigned long) table) & 15;
292 table = (unsigned long *)(((unsigned long) table) ^ bits);
293 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 page->flags ^= bits;
295 if (!(page->flags & FRAG_MASK)) {
296 pgtable_page_dtor(page);
297 __free_page(page);
301 void page_table_free(struct mm_struct *mm, unsigned long *table)
303 struct page *page;
304 unsigned long bits;
306 bits = (mm->context.has_pgste) ? 3UL : 1UL;
307 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
308 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
309 spin_lock_bh(&mm->context.list_lock);
310 page->flags ^= bits;
311 if (page->flags & FRAG_MASK) {
312 /* Page now has some free pgtable fragments. */
313 if (!list_empty(&page->lru))
314 list_move(&page->lru, &mm->context.pgtable_list);
315 page = NULL;
316 } else
317 /* All fragments of the 4K page have been freed. */
318 list_del(&page->lru);
319 spin_unlock_bh(&mm->context.list_lock);
320 if (page) {
321 pgtable_page_dtor(page);
322 __free_page(page);
326 void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
328 struct rcu_table_freelist *batch;
329 struct page *page;
330 unsigned long bits;
332 preempt_disable();
333 if (atomic_read(&mm->mm_users) < 2 &&
334 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
335 page_table_free(mm, table);
336 goto out;
338 batch = rcu_table_freelist_get(mm);
339 if (!batch) {
340 smp_call_function(smp_sync, NULL, 1);
341 page_table_free(mm, table);
342 goto out;
344 bits = (mm->context.has_pgste) ? 3UL : 1UL;
345 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
346 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
347 spin_lock_bh(&mm->context.list_lock);
348 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
349 list_del_init(&page->lru);
350 spin_unlock_bh(&mm->context.list_lock);
351 table = (unsigned long *)(((unsigned long) table) | bits);
352 batch->table[batch->pgt_index++] = table;
353 if (batch->pgt_index >= batch->crst_index)
354 rcu_table_freelist_finish();
355 out:
356 preempt_enable();
360 * switch on pgstes for its userspace process (for kvm)
362 int s390_enable_sie(void)
364 struct task_struct *tsk = current;
365 struct mm_struct *mm, *old_mm;
367 /* Do we have switched amode? If no, we cannot do sie */
368 if (user_mode == HOME_SPACE_MODE)
369 return -EINVAL;
371 /* Do we have pgstes? if yes, we are done */
372 if (tsk->mm->context.has_pgste)
373 return 0;
375 /* lets check if we are allowed to replace the mm */
376 task_lock(tsk);
377 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
378 #ifdef CONFIG_AIO
379 !hlist_empty(&tsk->mm->ioctx_list) ||
380 #endif
381 tsk->mm != tsk->active_mm) {
382 task_unlock(tsk);
383 return -EINVAL;
385 task_unlock(tsk);
387 /* we copy the mm and let dup_mm create the page tables with_pgstes */
388 tsk->mm->context.alloc_pgste = 1;
389 mm = dup_mm(tsk);
390 tsk->mm->context.alloc_pgste = 0;
391 if (!mm)
392 return -ENOMEM;
394 /* Now lets check again if something happened */
395 task_lock(tsk);
396 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
397 #ifdef CONFIG_AIO
398 !hlist_empty(&tsk->mm->ioctx_list) ||
399 #endif
400 tsk->mm != tsk->active_mm) {
401 mmput(mm);
402 task_unlock(tsk);
403 return -EINVAL;
406 /* ok, we are alone. No ptrace, no threads, etc. */
407 old_mm = tsk->mm;
408 tsk->mm = tsk->active_mm = mm;
409 preempt_disable();
410 update_mm(mm, tsk);
411 atomic_inc(&mm->context.attach_count);
412 atomic_dec(&old_mm->context.attach_count);
413 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
414 preempt_enable();
415 task_unlock(tsk);
416 mmput(old_mm);
417 return 0;
419 EXPORT_SYMBOL_GPL(s390_enable_sie);
421 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
422 bool kernel_page_present(struct page *page)
424 unsigned long addr;
425 int cc;
427 addr = page_to_phys(page);
428 asm volatile(
429 " lra %1,0(%1)\n"
430 " ipm %0\n"
431 " srl %0,28"
432 : "=d" (cc), "+a" (addr) : : "cc");
433 return cc == 0;
435 #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */