mfd: wm8350-i2c: Make sure the i2c regmap functions are compiled
[linux/fpc-iii.git] / arch / s390 / mm / pgtable.c
blob5664be4a3680566ff5e7754e9cbb6e8085eae544
1 /*
2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 #ifndef CONFIG_64BIT
28 #define ALLOC_ORDER 1
29 #define FRAG_MASK 0x0f
30 #else
31 #define ALLOC_ORDER 2
32 #define FRAG_MASK 0x03
33 #endif
36 unsigned long *crst_table_alloc(struct mm_struct *mm)
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
40 if (!page)
41 return NULL;
42 return (unsigned long *) page_to_phys(page);
45 void crst_table_free(struct mm_struct *mm, unsigned long *table)
47 free_pages((unsigned long) table, ALLOC_ORDER);
50 #ifdef CONFIG_64BIT
51 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
53 unsigned long *table, *pgd;
54 unsigned long entry;
56 BUG_ON(limit > (1UL << 53));
57 repeat:
58 table = crst_table_alloc(mm);
59 if (!table)
60 return -ENOMEM;
61 spin_lock_bh(&mm->page_table_lock);
62 if (mm->context.asce_limit < limit) {
63 pgd = (unsigned long *) mm->pgd;
64 if (mm->context.asce_limit <= (1UL << 31)) {
65 entry = _REGION3_ENTRY_EMPTY;
66 mm->context.asce_limit = 1UL << 42;
67 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
68 _ASCE_USER_BITS |
69 _ASCE_TYPE_REGION3;
70 } else {
71 entry = _REGION2_ENTRY_EMPTY;
72 mm->context.asce_limit = 1UL << 53;
73 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
74 _ASCE_USER_BITS |
75 _ASCE_TYPE_REGION2;
77 crst_table_init(table, entry);
78 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
79 mm->pgd = (pgd_t *) table;
80 mm->task_size = mm->context.asce_limit;
81 table = NULL;
83 spin_unlock_bh(&mm->page_table_lock);
84 if (table)
85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit)
87 goto repeat;
88 return 0;
91 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
93 pgd_t *pgd;
95 while (mm->context.asce_limit > limit) {
96 pgd = mm->pgd;
97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
98 case _REGION_ENTRY_TYPE_R2:
99 mm->context.asce_limit = 1UL << 42;
100 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
101 _ASCE_USER_BITS |
102 _ASCE_TYPE_REGION3;
103 break;
104 case _REGION_ENTRY_TYPE_R3:
105 mm->context.asce_limit = 1UL << 31;
106 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
107 _ASCE_USER_BITS |
108 _ASCE_TYPE_SEGMENT;
109 break;
110 default:
111 BUG();
113 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
114 mm->task_size = mm->context.asce_limit;
115 crst_table_free(mm, (unsigned long *) pgd);
118 #endif
120 #ifdef CONFIG_PGSTE
123 * gmap_alloc - allocate a guest address space
124 * @mm: pointer to the parent mm_struct
126 * Returns a guest address space structure.
128 struct gmap *gmap_alloc(struct mm_struct *mm)
130 struct gmap *gmap;
131 struct page *page;
132 unsigned long *table;
134 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
135 if (!gmap)
136 goto out;
137 INIT_LIST_HEAD(&gmap->crst_list);
138 gmap->mm = mm;
139 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
140 if (!page)
141 goto out_free;
142 list_add(&page->lru, &gmap->crst_list);
143 table = (unsigned long *) page_to_phys(page);
144 crst_table_init(table, _REGION1_ENTRY_EMPTY);
145 gmap->table = table;
146 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
147 _ASCE_USER_BITS | __pa(table);
148 list_add(&gmap->list, &mm->context.gmap_list);
149 return gmap;
151 out_free:
152 kfree(gmap);
153 out:
154 return NULL;
156 EXPORT_SYMBOL_GPL(gmap_alloc);
158 static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
160 struct gmap_pgtable *mp;
161 struct gmap_rmap *rmap;
162 struct page *page;
164 if (*table & _SEGMENT_ENTRY_INVALID)
165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index;
168 list_for_each_entry(rmap, &mp->mapper, list) {
169 if (rmap->entry != table)
170 continue;
171 list_del(&rmap->list);
172 kfree(rmap);
173 break;
175 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
176 return 1;
179 static void gmap_flush_tlb(struct gmap *gmap)
181 if (MACHINE_HAS_IDTE)
182 __tlb_flush_idte((unsigned long) gmap->table |
183 _ASCE_TYPE_REGION1);
184 else
185 __tlb_flush_global();
189 * gmap_free - free a guest address space
190 * @gmap: pointer to the guest address space structure
192 void gmap_free(struct gmap *gmap)
194 struct page *page, *next;
195 unsigned long *table;
196 int i;
199 /* Flush tlb. */
200 if (MACHINE_HAS_IDTE)
201 __tlb_flush_idte((unsigned long) gmap->table |
202 _ASCE_TYPE_REGION1);
203 else
204 __tlb_flush_global();
206 /* Free all segment & region tables. */
207 down_read(&gmap->mm->mmap_sem);
208 spin_lock(&gmap->mm->page_table_lock);
209 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
210 table = (unsigned long *) page_to_phys(page);
211 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
212 /* Remove gmap rmap structures for segment table. */
213 for (i = 0; i < PTRS_PER_PMD; i++, table++)
214 gmap_unlink_segment(gmap, table);
215 __free_pages(page, ALLOC_ORDER);
217 spin_unlock(&gmap->mm->page_table_lock);
218 up_read(&gmap->mm->mmap_sem);
219 list_del(&gmap->list);
220 kfree(gmap);
222 EXPORT_SYMBOL_GPL(gmap_free);
225 * gmap_enable - switch primary space to the guest address space
226 * @gmap: pointer to the guest address space structure
228 void gmap_enable(struct gmap *gmap)
230 S390_lowcore.gmap = (unsigned long) gmap;
232 EXPORT_SYMBOL_GPL(gmap_enable);
235 * gmap_disable - switch back to the standard primary address space
236 * @gmap: pointer to the guest address space structure
238 void gmap_disable(struct gmap *gmap)
240 S390_lowcore.gmap = 0UL;
242 EXPORT_SYMBOL_GPL(gmap_disable);
245 * gmap_alloc_table is assumed to be called with mmap_sem held
247 static int gmap_alloc_table(struct gmap *gmap,
248 unsigned long *table, unsigned long init)
249 __releases(&gmap->mm->page_table_lock)
250 __acquires(&gmap->mm->page_table_lock)
252 struct page *page;
253 unsigned long *new;
255 /* since we dont free the gmap table until gmap_free we can unlock */
256 spin_unlock(&gmap->mm->page_table_lock);
257 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
258 spin_lock(&gmap->mm->page_table_lock);
259 if (!page)
260 return -ENOMEM;
261 new = (unsigned long *) page_to_phys(page);
262 crst_table_init(new, init);
263 if (*table & _REGION_ENTRY_INVALID) {
264 list_add(&page->lru, &gmap->crst_list);
265 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
266 (*table & _REGION_ENTRY_TYPE_MASK);
267 } else
268 __free_pages(page, ALLOC_ORDER);
269 return 0;
273 * gmap_unmap_segment - unmap segment from the guest address space
274 * @gmap: pointer to the guest address space structure
275 * @addr: address in the guest address space
276 * @len: length of the memory area to unmap
278 * Returns 0 if the unmap succeded, -EINVAL if not.
280 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
282 unsigned long *table;
283 unsigned long off;
284 int flush;
286 if ((to | len) & (PMD_SIZE - 1))
287 return -EINVAL;
288 if (len == 0 || to + len < to)
289 return -EINVAL;
291 flush = 0;
292 down_read(&gmap->mm->mmap_sem);
293 spin_lock(&gmap->mm->page_table_lock);
294 for (off = 0; off < len; off += PMD_SIZE) {
295 /* Walk the guest addr space page table */
296 table = gmap->table + (((to + off) >> 53) & 0x7ff);
297 if (*table & _REGION_ENTRY_INVALID)
298 goto out;
299 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
300 table = table + (((to + off) >> 42) & 0x7ff);
301 if (*table & _REGION_ENTRY_INVALID)
302 goto out;
303 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
304 table = table + (((to + off) >> 31) & 0x7ff);
305 if (*table & _REGION_ENTRY_INVALID)
306 goto out;
307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
308 table = table + (((to + off) >> 20) & 0x7ff);
310 /* Clear segment table entry in guest address space. */
311 flush |= gmap_unlink_segment(gmap, table);
312 *table = _SEGMENT_ENTRY_INVALID;
314 out:
315 spin_unlock(&gmap->mm->page_table_lock);
316 up_read(&gmap->mm->mmap_sem);
317 if (flush)
318 gmap_flush_tlb(gmap);
319 return 0;
321 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
324 * gmap_mmap_segment - map a segment to the guest address space
325 * @gmap: pointer to the guest address space structure
326 * @from: source address in the parent address space
327 * @to: target address in the guest address space
329 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
331 int gmap_map_segment(struct gmap *gmap, unsigned long from,
332 unsigned long to, unsigned long len)
334 unsigned long *table;
335 unsigned long off;
336 int flush;
338 if ((from | to | len) & (PMD_SIZE - 1))
339 return -EINVAL;
340 if (len == 0 || from + len > TASK_MAX_SIZE ||
341 from + len < from || to + len < to)
342 return -EINVAL;
344 flush = 0;
345 down_read(&gmap->mm->mmap_sem);
346 spin_lock(&gmap->mm->page_table_lock);
347 for (off = 0; off < len; off += PMD_SIZE) {
348 /* Walk the gmap address space page table */
349 table = gmap->table + (((to + off) >> 53) & 0x7ff);
350 if ((*table & _REGION_ENTRY_INVALID) &&
351 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
352 goto out_unmap;
353 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
354 table = table + (((to + off) >> 42) & 0x7ff);
355 if ((*table & _REGION_ENTRY_INVALID) &&
356 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
357 goto out_unmap;
358 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
359 table = table + (((to + off) >> 31) & 0x7ff);
360 if ((*table & _REGION_ENTRY_INVALID) &&
361 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
362 goto out_unmap;
363 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
364 table = table + (((to + off) >> 20) & 0x7ff);
366 /* Store 'from' address in an invalid segment table entry. */
367 flush |= gmap_unlink_segment(gmap, table);
368 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
369 _SEGMENT_ENTRY_PROTECT);
371 spin_unlock(&gmap->mm->page_table_lock);
372 up_read(&gmap->mm->mmap_sem);
373 if (flush)
374 gmap_flush_tlb(gmap);
375 return 0;
377 out_unmap:
378 spin_unlock(&gmap->mm->page_table_lock);
379 up_read(&gmap->mm->mmap_sem);
380 gmap_unmap_segment(gmap, to, len);
381 return -ENOMEM;
383 EXPORT_SYMBOL_GPL(gmap_map_segment);
385 static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
387 unsigned long *table;
389 table = gmap->table + ((address >> 53) & 0x7ff);
390 if (unlikely(*table & _REGION_ENTRY_INVALID))
391 return ERR_PTR(-EFAULT);
392 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
393 table = table + ((address >> 42) & 0x7ff);
394 if (unlikely(*table & _REGION_ENTRY_INVALID))
395 return ERR_PTR(-EFAULT);
396 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
397 table = table + ((address >> 31) & 0x7ff);
398 if (unlikely(*table & _REGION_ENTRY_INVALID))
399 return ERR_PTR(-EFAULT);
400 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
401 table = table + ((address >> 20) & 0x7ff);
402 return table;
406 * __gmap_translate - translate a guest address to a user space address
407 * @address: guest address
408 * @gmap: pointer to guest mapping meta data structure
410 * Returns user space address which corresponds to the guest address or
411 * -EFAULT if no such mapping exists.
412 * This function does not establish potentially missing page table entries.
413 * The mmap_sem of the mm that belongs to the address space must be held
414 * when this function gets called.
416 unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
418 unsigned long *segment_ptr, vmaddr, segment;
419 struct gmap_pgtable *mp;
420 struct page *page;
422 current->thread.gmap_addr = address;
423 segment_ptr = gmap_table_walk(address, gmap);
424 if (IS_ERR(segment_ptr))
425 return PTR_ERR(segment_ptr);
426 /* Convert the gmap address to an mm address. */
427 segment = *segment_ptr;
428 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
429 page = pfn_to_page(segment >> PAGE_SHIFT);
430 mp = (struct gmap_pgtable *) page->index;
431 return mp->vmaddr | (address & ~PMD_MASK);
432 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
433 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
434 return vmaddr | (address & ~PMD_MASK);
436 return -EFAULT;
438 EXPORT_SYMBOL_GPL(__gmap_translate);
441 * gmap_translate - translate a guest address to a user space address
442 * @address: guest address
443 * @gmap: pointer to guest mapping meta data structure
445 * Returns user space address which corresponds to the guest address or
446 * -EFAULT if no such mapping exists.
447 * This function does not establish potentially missing page table entries.
449 unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
451 unsigned long rc;
453 down_read(&gmap->mm->mmap_sem);
454 rc = __gmap_translate(address, gmap);
455 up_read(&gmap->mm->mmap_sem);
456 return rc;
458 EXPORT_SYMBOL_GPL(gmap_translate);
460 static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
461 unsigned long *segment_ptr, struct gmap *gmap)
463 unsigned long vmaddr;
464 struct vm_area_struct *vma;
465 struct gmap_pgtable *mp;
466 struct gmap_rmap *rmap;
467 struct mm_struct *mm;
468 struct page *page;
469 pgd_t *pgd;
470 pud_t *pud;
471 pmd_t *pmd;
473 mm = gmap->mm;
474 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
475 vma = find_vma(mm, vmaddr);
476 if (!vma || vma->vm_start > vmaddr)
477 return -EFAULT;
478 /* Walk the parent mm page table */
479 pgd = pgd_offset(mm, vmaddr);
480 pud = pud_alloc(mm, pgd, vmaddr);
481 if (!pud)
482 return -ENOMEM;
483 pmd = pmd_alloc(mm, pud, vmaddr);
484 if (!pmd)
485 return -ENOMEM;
486 if (!pmd_present(*pmd) &&
487 __pte_alloc(mm, vma, pmd, vmaddr))
488 return -ENOMEM;
489 /* pmd now points to a valid segment table entry. */
490 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
491 if (!rmap)
492 return -ENOMEM;
493 /* Link gmap segment table entry location to page table. */
494 page = pmd_page(*pmd);
495 mp = (struct gmap_pgtable *) page->index;
496 rmap->gmap = gmap;
497 rmap->entry = segment_ptr;
498 rmap->vmaddr = address & PMD_MASK;
499 spin_lock(&mm->page_table_lock);
500 if (*segment_ptr == segment) {
501 list_add(&rmap->list, &mp->mapper);
502 /* Set gmap segment table entry to page table. */
503 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
504 rmap = NULL;
506 spin_unlock(&mm->page_table_lock);
507 kfree(rmap);
508 return 0;
511 static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
513 struct gmap_rmap *rmap, *next;
514 struct gmap_pgtable *mp;
515 struct page *page;
516 int flush;
518 flush = 0;
519 spin_lock(&mm->page_table_lock);
520 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
521 mp = (struct gmap_pgtable *) page->index;
522 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
523 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
524 _SEGMENT_ENTRY_PROTECT);
525 list_del(&rmap->list);
526 kfree(rmap);
527 flush = 1;
529 spin_unlock(&mm->page_table_lock);
530 if (flush)
531 __tlb_flush_global();
535 * this function is assumed to be called with mmap_sem held
537 unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
539 unsigned long *segment_ptr, segment;
540 struct gmap_pgtable *mp;
541 struct page *page;
542 int rc;
544 current->thread.gmap_addr = address;
545 segment_ptr = gmap_table_walk(address, gmap);
546 if (IS_ERR(segment_ptr))
547 return -EFAULT;
548 /* Convert the gmap address to an mm address. */
549 while (1) {
550 segment = *segment_ptr;
551 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
552 /* Page table is present */
553 page = pfn_to_page(segment >> PAGE_SHIFT);
554 mp = (struct gmap_pgtable *) page->index;
555 return mp->vmaddr | (address & ~PMD_MASK);
557 if (!(segment & _SEGMENT_ENTRY_PROTECT))
558 /* Nothing mapped in the gmap address space. */
559 break;
560 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
561 if (rc)
562 return rc;
564 return -EFAULT;
567 unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
569 unsigned long rc;
571 down_read(&gmap->mm->mmap_sem);
572 rc = __gmap_fault(address, gmap);
573 up_read(&gmap->mm->mmap_sem);
575 return rc;
577 EXPORT_SYMBOL_GPL(gmap_fault);
579 void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
582 unsigned long *table, address, size;
583 struct vm_area_struct *vma;
584 struct gmap_pgtable *mp;
585 struct page *page;
587 down_read(&gmap->mm->mmap_sem);
588 address = from;
589 while (address < to) {
590 /* Walk the gmap address space page table */
591 table = gmap->table + ((address >> 53) & 0x7ff);
592 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
593 address = (address + PMD_SIZE) & PMD_MASK;
594 continue;
596 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
597 table = table + ((address >> 42) & 0x7ff);
598 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
599 address = (address + PMD_SIZE) & PMD_MASK;
600 continue;
602 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
603 table = table + ((address >> 31) & 0x7ff);
604 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
605 address = (address + PMD_SIZE) & PMD_MASK;
606 continue;
608 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
609 table = table + ((address >> 20) & 0x7ff);
610 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
611 address = (address + PMD_SIZE) & PMD_MASK;
612 continue;
614 page = pfn_to_page(*table >> PAGE_SHIFT);
615 mp = (struct gmap_pgtable *) page->index;
616 vma = find_vma(gmap->mm, mp->vmaddr);
617 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
618 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
619 size, NULL);
620 address = (address + PMD_SIZE) & PMD_MASK;
622 up_read(&gmap->mm->mmap_sem);
624 EXPORT_SYMBOL_GPL(gmap_discard);
626 static LIST_HEAD(gmap_notifier_list);
627 static DEFINE_SPINLOCK(gmap_notifier_lock);
630 * gmap_register_ipte_notifier - register a pte invalidation callback
631 * @nb: pointer to the gmap notifier block
633 void gmap_register_ipte_notifier(struct gmap_notifier *nb)
635 spin_lock(&gmap_notifier_lock);
636 list_add(&nb->list, &gmap_notifier_list);
637 spin_unlock(&gmap_notifier_lock);
639 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
642 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
643 * @nb: pointer to the gmap notifier block
645 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
647 spin_lock(&gmap_notifier_lock);
648 list_del_init(&nb->list);
649 spin_unlock(&gmap_notifier_lock);
651 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
654 * gmap_ipte_notify - mark a range of ptes for invalidation notification
655 * @gmap: pointer to guest mapping meta data structure
656 * @address: virtual address in the guest address space
657 * @len: size of area
659 * Returns 0 if for each page in the given range a gmap mapping exists and
660 * the invalidation notification could be set. If the gmap mapping is missing
661 * for one or more pages -EFAULT is returned. If no memory could be allocated
662 * -ENOMEM is returned. This function establishes missing page table entries.
664 int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
666 unsigned long addr;
667 spinlock_t *ptl;
668 pte_t *ptep, entry;
669 pgste_t pgste;
670 int rc = 0;
672 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
673 return -EINVAL;
674 down_read(&gmap->mm->mmap_sem);
675 while (len) {
676 /* Convert gmap address and connect the page tables */
677 addr = __gmap_fault(start, gmap);
678 if (IS_ERR_VALUE(addr)) {
679 rc = addr;
680 break;
682 /* Get the page mapped */
683 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
684 rc = -EFAULT;
685 break;
687 /* Walk the process page table, lock and get pte pointer */
688 ptep = get_locked_pte(gmap->mm, addr, &ptl);
689 if (unlikely(!ptep))
690 continue;
691 /* Set notification bit in the pgste of the pte */
692 entry = *ptep;
693 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
694 pgste = pgste_get_lock(ptep);
695 pgste_val(pgste) |= PGSTE_IN_BIT;
696 pgste_set_unlock(ptep, pgste);
697 start += PAGE_SIZE;
698 len -= PAGE_SIZE;
700 spin_unlock(ptl);
702 up_read(&gmap->mm->mmap_sem);
703 return rc;
705 EXPORT_SYMBOL_GPL(gmap_ipte_notify);
708 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
709 * @mm: pointer to the process mm_struct
710 * @addr: virtual address in the process address space
711 * @pte: pointer to the page table entry
713 * This function is assumed to be called with the page table lock held
714 * for the pte to notify.
716 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
718 unsigned long segment_offset;
719 struct gmap_notifier *nb;
720 struct gmap_pgtable *mp;
721 struct gmap_rmap *rmap;
722 struct page *page;
724 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
725 segment_offset = segment_offset * (4096 / sizeof(pte_t));
726 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
727 mp = (struct gmap_pgtable *) page->index;
728 spin_lock(&gmap_notifier_lock);
729 list_for_each_entry(rmap, &mp->mapper, list) {
730 list_for_each_entry(nb, &gmap_notifier_list, list)
731 nb->notifier_call(rmap->gmap,
732 rmap->vmaddr + segment_offset);
734 spin_unlock(&gmap_notifier_lock);
737 static inline int page_table_with_pgste(struct page *page)
739 return atomic_read(&page->_mapcount) == 0;
742 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
743 unsigned long vmaddr)
745 struct page *page;
746 unsigned long *table;
747 struct gmap_pgtable *mp;
749 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
750 if (!page)
751 return NULL;
752 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
753 if (!mp) {
754 __free_page(page);
755 return NULL;
757 pgtable_page_ctor(page);
758 mp->vmaddr = vmaddr & PMD_MASK;
759 INIT_LIST_HEAD(&mp->mapper);
760 page->index = (unsigned long) mp;
761 atomic_set(&page->_mapcount, 0);
762 table = (unsigned long *) page_to_phys(page);
763 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
764 clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
765 PAGE_SIZE/2);
766 return table;
769 static inline void page_table_free_pgste(unsigned long *table)
771 struct page *page;
772 struct gmap_pgtable *mp;
774 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
775 mp = (struct gmap_pgtable *) page->index;
776 BUG_ON(!list_empty(&mp->mapper));
777 pgtable_page_dtor(page);
778 atomic_set(&page->_mapcount, -1);
779 kfree(mp);
780 __free_page(page);
783 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
784 unsigned long key, bool nq)
786 spinlock_t *ptl;
787 pgste_t old, new;
788 pte_t *ptep;
790 down_read(&mm->mmap_sem);
791 retry:
792 ptep = get_locked_pte(current->mm, addr, &ptl);
793 if (unlikely(!ptep)) {
794 up_read(&mm->mmap_sem);
795 return -EFAULT;
797 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
798 (pte_val(*ptep) & _PAGE_PROTECT)) {
799 pte_unmap_unlock(*ptep, ptl);
800 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
801 up_read(&mm->mmap_sem);
802 return -EFAULT;
804 goto retry;
807 new = old = pgste_get_lock(ptep);
808 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
809 PGSTE_ACC_BITS | PGSTE_FP_BIT);
810 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
811 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
812 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
813 unsigned long address, bits, skey;
815 address = pte_val(*ptep) & PAGE_MASK;
816 skey = (unsigned long) page_get_storage_key(address);
817 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
818 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
819 /* Set storage key ACC and FP */
820 page_set_storage_key(address, skey, !nq);
821 /* Merge host changed & referenced into pgste */
822 pgste_val(new) |= bits << 52;
824 /* changing the guest storage key is considered a change of the page */
825 if ((pgste_val(new) ^ pgste_val(old)) &
826 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
827 pgste_val(new) |= PGSTE_HC_BIT;
829 pgste_set_unlock(ptep, new);
830 pte_unmap_unlock(*ptep, ptl);
831 up_read(&mm->mmap_sem);
832 return 0;
834 EXPORT_SYMBOL(set_guest_storage_key);
836 #else /* CONFIG_PGSTE */
838 static inline int page_table_with_pgste(struct page *page)
840 return 0;
843 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
844 unsigned long vmaddr)
846 return NULL;
849 static inline void page_table_free_pgste(unsigned long *table)
853 static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
854 unsigned long *table)
858 #endif /* CONFIG_PGSTE */
860 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
862 unsigned int old, new;
864 do {
865 old = atomic_read(v);
866 new = old ^ bits;
867 } while (atomic_cmpxchg(v, old, new) != old);
868 return new;
872 * page table entry allocation/free routines.
874 unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
876 unsigned long *uninitialized_var(table);
877 struct page *uninitialized_var(page);
878 unsigned int mask, bit;
880 if (mm_has_pgste(mm))
881 return page_table_alloc_pgste(mm, vmaddr);
882 /* Allocate fragments of a 4K page as 1K/2K page table */
883 spin_lock_bh(&mm->context.list_lock);
884 mask = FRAG_MASK;
885 if (!list_empty(&mm->context.pgtable_list)) {
886 page = list_first_entry(&mm->context.pgtable_list,
887 struct page, lru);
888 table = (unsigned long *) page_to_phys(page);
889 mask = atomic_read(&page->_mapcount);
890 mask = mask | (mask >> 4);
892 if ((mask & FRAG_MASK) == FRAG_MASK) {
893 spin_unlock_bh(&mm->context.list_lock);
894 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
895 if (!page)
896 return NULL;
897 pgtable_page_ctor(page);
898 atomic_set(&page->_mapcount, 1);
899 table = (unsigned long *) page_to_phys(page);
900 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
901 spin_lock_bh(&mm->context.list_lock);
902 list_add(&page->lru, &mm->context.pgtable_list);
903 } else {
904 for (bit = 1; mask & bit; bit <<= 1)
905 table += PTRS_PER_PTE;
906 mask = atomic_xor_bits(&page->_mapcount, bit);
907 if ((mask & FRAG_MASK) == FRAG_MASK)
908 list_del(&page->lru);
910 spin_unlock_bh(&mm->context.list_lock);
911 return table;
914 void page_table_free(struct mm_struct *mm, unsigned long *table)
916 struct page *page;
917 unsigned int bit, mask;
919 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
920 if (page_table_with_pgste(page)) {
921 gmap_disconnect_pgtable(mm, table);
922 return page_table_free_pgste(table);
924 /* Free 1K/2K page table fragment of a 4K page */
925 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
926 spin_lock_bh(&mm->context.list_lock);
927 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
928 list_del(&page->lru);
929 mask = atomic_xor_bits(&page->_mapcount, bit);
930 if (mask & FRAG_MASK)
931 list_add(&page->lru, &mm->context.pgtable_list);
932 spin_unlock_bh(&mm->context.list_lock);
933 if (mask == 0) {
934 pgtable_page_dtor(page);
935 atomic_set(&page->_mapcount, -1);
936 __free_page(page);
940 static void __page_table_free_rcu(void *table, unsigned bit)
942 struct page *page;
944 if (bit == FRAG_MASK)
945 return page_table_free_pgste(table);
946 /* Free 1K/2K page table fragment of a 4K page */
947 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
948 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
949 pgtable_page_dtor(page);
950 atomic_set(&page->_mapcount, -1);
951 __free_page(page);
955 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
957 struct mm_struct *mm;
958 struct page *page;
959 unsigned int bit, mask;
961 mm = tlb->mm;
962 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
963 if (page_table_with_pgste(page)) {
964 gmap_disconnect_pgtable(mm, table);
965 table = (unsigned long *) (__pa(table) | FRAG_MASK);
966 tlb_remove_table(tlb, table);
967 return;
969 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
970 spin_lock_bh(&mm->context.list_lock);
971 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
972 list_del(&page->lru);
973 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
974 if (mask & FRAG_MASK)
975 list_add_tail(&page->lru, &mm->context.pgtable_list);
976 spin_unlock_bh(&mm->context.list_lock);
977 table = (unsigned long *) (__pa(table) | (bit << 4));
978 tlb_remove_table(tlb, table);
981 static void __tlb_remove_table(void *_table)
983 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
984 void *table = (void *)((unsigned long) _table & ~mask);
985 unsigned type = (unsigned long) _table & mask;
987 if (type)
988 __page_table_free_rcu(table, type);
989 else
990 free_pages((unsigned long) table, ALLOC_ORDER);
993 static void tlb_remove_table_smp_sync(void *arg)
995 /* Simply deliver the interrupt */
998 static void tlb_remove_table_one(void *table)
1001 * This isn't an RCU grace period and hence the page-tables cannot be
1002 * assumed to be actually RCU-freed.
1004 * It is however sufficient for software page-table walkers that rely
1005 * on IRQ disabling. See the comment near struct mmu_table_batch.
1007 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1008 __tlb_remove_table(table);
1011 static void tlb_remove_table_rcu(struct rcu_head *head)
1013 struct mmu_table_batch *batch;
1014 int i;
1016 batch = container_of(head, struct mmu_table_batch, rcu);
1018 for (i = 0; i < batch->nr; i++)
1019 __tlb_remove_table(batch->tables[i]);
1021 free_page((unsigned long)batch);
1024 void tlb_table_flush(struct mmu_gather *tlb)
1026 struct mmu_table_batch **batch = &tlb->batch;
1028 if (*batch) {
1029 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1030 *batch = NULL;
1034 void tlb_remove_table(struct mmu_gather *tlb, void *table)
1036 struct mmu_table_batch **batch = &tlb->batch;
1038 tlb->mm->context.flush_mm = 1;
1039 if (*batch == NULL) {
1040 *batch = (struct mmu_table_batch *)
1041 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1042 if (*batch == NULL) {
1043 __tlb_flush_mm_lazy(tlb->mm);
1044 tlb_remove_table_one(table);
1045 return;
1047 (*batch)->nr = 0;
1049 (*batch)->tables[(*batch)->nr++] = table;
1050 if ((*batch)->nr == MAX_TABLE_BATCH)
1051 tlb_flush_mmu(tlb);
1054 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1055 static inline void thp_split_vma(struct vm_area_struct *vma)
1057 unsigned long addr;
1059 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1060 follow_page(vma, addr, FOLL_SPLIT);
1063 static inline void thp_split_mm(struct mm_struct *mm)
1065 struct vm_area_struct *vma;
1067 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1068 thp_split_vma(vma);
1069 vma->vm_flags &= ~VM_HUGEPAGE;
1070 vma->vm_flags |= VM_NOHUGEPAGE;
1072 mm->def_flags |= VM_NOHUGEPAGE;
1074 #else
1075 static inline void thp_split_mm(struct mm_struct *mm)
1078 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1080 static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1081 struct mm_struct *mm, pud_t *pud,
1082 unsigned long addr, unsigned long end)
1084 unsigned long next, *table, *new;
1085 struct page *page;
1086 pmd_t *pmd;
1088 pmd = pmd_offset(pud, addr);
1089 do {
1090 next = pmd_addr_end(addr, end);
1091 again:
1092 if (pmd_none_or_clear_bad(pmd))
1093 continue;
1094 table = (unsigned long *) pmd_deref(*pmd);
1095 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1096 if (page_table_with_pgste(page))
1097 continue;
1098 /* Allocate new page table with pgstes */
1099 new = page_table_alloc_pgste(mm, addr);
1100 if (!new) {
1101 mm->context.has_pgste = 0;
1102 continue;
1104 spin_lock(&mm->page_table_lock);
1105 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1106 /* Nuke pmd entry pointing to the "short" page table */
1107 pmdp_flush_lazy(mm, addr, pmd);
1108 pmd_clear(pmd);
1109 /* Copy ptes from old table to new table */
1110 memcpy(new, table, PAGE_SIZE/2);
1111 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1112 /* Establish new table */
1113 pmd_populate(mm, pmd, (pte_t *) new);
1114 /* Free old table with rcu, there might be a walker! */
1115 page_table_free_rcu(tlb, table);
1116 new = NULL;
1118 spin_unlock(&mm->page_table_lock);
1119 if (new) {
1120 page_table_free_pgste(new);
1121 goto again;
1123 } while (pmd++, addr = next, addr != end);
1125 return addr;
1128 static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1129 struct mm_struct *mm, pgd_t *pgd,
1130 unsigned long addr, unsigned long end)
1132 unsigned long next;
1133 pud_t *pud;
1135 pud = pud_offset(pgd, addr);
1136 do {
1137 next = pud_addr_end(addr, end);
1138 if (pud_none_or_clear_bad(pud))
1139 continue;
1140 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1141 } while (pud++, addr = next, addr != end);
1143 return addr;
1146 static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1147 unsigned long addr, unsigned long end)
1149 unsigned long next;
1150 pgd_t *pgd;
1152 pgd = pgd_offset(mm, addr);
1153 do {
1154 next = pgd_addr_end(addr, end);
1155 if (pgd_none_or_clear_bad(pgd))
1156 continue;
1157 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1158 } while (pgd++, addr = next, addr != end);
1162 * switch on pgstes for its userspace process (for kvm)
1164 int s390_enable_sie(void)
1166 struct task_struct *tsk = current;
1167 struct mm_struct *mm = tsk->mm;
1168 struct mmu_gather tlb;
1170 /* Do we have switched amode? If no, we cannot do sie */
1171 if (s390_user_mode == HOME_SPACE_MODE)
1172 return -EINVAL;
1174 /* Do we have pgstes? if yes, we are done */
1175 if (mm_has_pgste(tsk->mm))
1176 return 0;
1178 down_write(&mm->mmap_sem);
1179 /* split thp mappings and disable thp for future mappings */
1180 thp_split_mm(mm);
1181 /* Reallocate the page tables with pgstes */
1182 mm->context.has_pgste = 1;
1183 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1184 page_table_realloc(&tlb, mm, 0, TASK_SIZE);
1185 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1186 up_write(&mm->mmap_sem);
1187 return mm->context.has_pgste ? 0 : -ENOMEM;
1189 EXPORT_SYMBOL_GPL(s390_enable_sie);
1191 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1192 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1193 pmd_t *pmdp)
1195 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1196 /* No need to flush TLB
1197 * On s390 reference bits are in storage key and never in TLB */
1198 return pmdp_test_and_clear_young(vma, address, pmdp);
1201 int pmdp_set_access_flags(struct vm_area_struct *vma,
1202 unsigned long address, pmd_t *pmdp,
1203 pmd_t entry, int dirty)
1205 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1207 if (pmd_same(*pmdp, entry))
1208 return 0;
1209 pmdp_invalidate(vma, address, pmdp);
1210 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1211 return 1;
1214 static void pmdp_splitting_flush_sync(void *arg)
1216 /* Simply deliver the interrupt */
1219 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1220 pmd_t *pmdp)
1222 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1223 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1224 (unsigned long *) pmdp)) {
1225 /* need to serialize against gup-fast (IRQ disabled) */
1226 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1230 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1231 pgtable_t pgtable)
1233 struct list_head *lh = (struct list_head *) pgtable;
1235 assert_spin_locked(&mm->page_table_lock);
1237 /* FIFO */
1238 if (!mm->pmd_huge_pte)
1239 INIT_LIST_HEAD(lh);
1240 else
1241 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
1242 mm->pmd_huge_pte = pgtable;
1245 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1247 struct list_head *lh;
1248 pgtable_t pgtable;
1249 pte_t *ptep;
1251 assert_spin_locked(&mm->page_table_lock);
1253 /* FIFO */
1254 pgtable = mm->pmd_huge_pte;
1255 lh = (struct list_head *) pgtable;
1256 if (list_empty(lh))
1257 mm->pmd_huge_pte = NULL;
1258 else {
1259 mm->pmd_huge_pte = (pgtable_t) lh->next;
1260 list_del(lh);
1262 ptep = (pte_t *) pgtable;
1263 pte_val(*ptep) = _PAGE_INVALID;
1264 ptep++;
1265 pte_val(*ptep) = _PAGE_INVALID;
1266 return pgtable;
1268 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */