Linux 4.13.16
[linux/fpc-iii.git] / arch / s390 / mm / gmap.c
blobff84fb214bf96d91ad0977d2d804492b21340200
1 /*
2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/gmap.h>
21 #include <asm/tlb.h>
23 #define GMAP_SHADOW_FAKE_TABLE 1ULL
25 /**
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
30 * Returns a guest address space structure.
32 static struct gmap *gmap_alloc(unsigned long limit)
34 struct gmap *gmap;
35 struct page *page;
36 unsigned long *table;
37 unsigned long etype, atype;
39 if (limit < (1UL << 31)) {
40 limit = (1UL << 31) - 1;
41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < (1UL << 42)) {
44 limit = (1UL << 42) - 1;
45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < (1UL << 53)) {
48 limit = (1UL << 53) - 1;
49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
51 } else {
52 limit = -1UL;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
57 if (!gmap)
58 goto out;
59 INIT_LIST_HEAD(&gmap->crst_list);
60 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
62 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
64 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
65 spin_lock_init(&gmap->guest_table_lock);
66 spin_lock_init(&gmap->shadow_lock);
67 atomic_set(&gmap->ref_count, 1);
68 page = alloc_pages(GFP_KERNEL, 2);
69 if (!page)
70 goto out_free;
71 page->index = 0;
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
75 gmap->table = table;
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
79 return gmap;
81 out_free:
82 kfree(gmap);
83 out:
84 return NULL;
87 /**
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
92 * Returns a guest address space structure.
94 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
96 struct gmap *gmap;
97 unsigned long gmap_asce;
99 gmap = gmap_alloc(limit);
100 if (!gmap)
101 return NULL;
102 gmap->mm = mm;
103 spin_lock(&mm->context.gmap_lock);
104 list_add_rcu(&gmap->list, &mm->context.gmap_list);
105 if (list_is_singular(&mm->context.gmap_list))
106 gmap_asce = gmap->asce;
107 else
108 gmap_asce = -1UL;
109 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
110 spin_unlock(&mm->context.gmap_lock);
111 return gmap;
113 EXPORT_SYMBOL_GPL(gmap_create);
115 static void gmap_flush_tlb(struct gmap *gmap)
117 if (MACHINE_HAS_IDTE)
118 __tlb_flush_idte(gmap->asce);
119 else
120 __tlb_flush_global();
123 static void gmap_radix_tree_free(struct radix_tree_root *root)
125 struct radix_tree_iter iter;
126 unsigned long indices[16];
127 unsigned long index;
128 void __rcu **slot;
129 int i, nr;
131 /* A radix tree is freed by deleting all of its entries */
132 index = 0;
133 do {
134 nr = 0;
135 radix_tree_for_each_slot(slot, root, &iter, index) {
136 indices[nr] = iter.index;
137 if (++nr == 16)
138 break;
140 for (i = 0; i < nr; i++) {
141 index = indices[i];
142 radix_tree_delete(root, index);
144 } while (nr > 0);
147 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
149 struct gmap_rmap *rmap, *rnext, *head;
150 struct radix_tree_iter iter;
151 unsigned long indices[16];
152 unsigned long index;
153 void __rcu **slot;
154 int i, nr;
156 /* A radix tree is freed by deleting all of its entries */
157 index = 0;
158 do {
159 nr = 0;
160 radix_tree_for_each_slot(slot, root, &iter, index) {
161 indices[nr] = iter.index;
162 if (++nr == 16)
163 break;
165 for (i = 0; i < nr; i++) {
166 index = indices[i];
167 head = radix_tree_delete(root, index);
168 gmap_for_each_rmap_safe(rmap, rnext, head)
169 kfree(rmap);
171 } while (nr > 0);
175 * gmap_free - free a guest address space
176 * @gmap: pointer to the guest address space structure
178 * No locks required. There are no references to this gmap anymore.
180 static void gmap_free(struct gmap *gmap)
182 struct page *page, *next;
184 /* Flush tlb of all gmaps (if not already done for shadows) */
185 if (!(gmap_is_shadow(gmap) && gmap->removed))
186 gmap_flush_tlb(gmap);
187 /* Free all segment & region tables. */
188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
189 __free_pages(page, 2);
190 gmap_radix_tree_free(&gmap->guest_to_host);
191 gmap_radix_tree_free(&gmap->host_to_guest);
193 /* Free additional data for a shadow gmap */
194 if (gmap_is_shadow(gmap)) {
195 /* Free all page tables. */
196 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
197 page_table_free_pgste(page);
198 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
199 /* Release reference to the parent */
200 gmap_put(gmap->parent);
203 kfree(gmap);
207 * gmap_get - increase reference counter for guest address space
208 * @gmap: pointer to the guest address space structure
210 * Returns the gmap pointer
212 struct gmap *gmap_get(struct gmap *gmap)
214 atomic_inc(&gmap->ref_count);
215 return gmap;
217 EXPORT_SYMBOL_GPL(gmap_get);
220 * gmap_put - decrease reference counter for guest address space
221 * @gmap: pointer to the guest address space structure
223 * If the reference counter reaches zero the guest address space is freed.
225 void gmap_put(struct gmap *gmap)
227 if (atomic_dec_return(&gmap->ref_count) == 0)
228 gmap_free(gmap);
230 EXPORT_SYMBOL_GPL(gmap_put);
233 * gmap_remove - remove a guest address space but do not free it yet
234 * @gmap: pointer to the guest address space structure
236 void gmap_remove(struct gmap *gmap)
238 struct gmap *sg, *next;
239 unsigned long gmap_asce;
241 /* Remove all shadow gmaps linked to this gmap */
242 if (!list_empty(&gmap->children)) {
243 spin_lock(&gmap->shadow_lock);
244 list_for_each_entry_safe(sg, next, &gmap->children, list) {
245 list_del(&sg->list);
246 gmap_put(sg);
248 spin_unlock(&gmap->shadow_lock);
250 /* Remove gmap from the pre-mm list */
251 spin_lock(&gmap->mm->context.gmap_lock);
252 list_del_rcu(&gmap->list);
253 if (list_empty(&gmap->mm->context.gmap_list))
254 gmap_asce = 0;
255 else if (list_is_singular(&gmap->mm->context.gmap_list))
256 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
257 struct gmap, list)->asce;
258 else
259 gmap_asce = -1UL;
260 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
261 spin_unlock(&gmap->mm->context.gmap_lock);
262 synchronize_rcu();
263 /* Put reference */
264 gmap_put(gmap);
266 EXPORT_SYMBOL_GPL(gmap_remove);
269 * gmap_enable - switch primary space to the guest address space
270 * @gmap: pointer to the guest address space structure
272 void gmap_enable(struct gmap *gmap)
274 S390_lowcore.gmap = (unsigned long) gmap;
276 EXPORT_SYMBOL_GPL(gmap_enable);
279 * gmap_disable - switch back to the standard primary address space
280 * @gmap: pointer to the guest address space structure
282 void gmap_disable(struct gmap *gmap)
284 S390_lowcore.gmap = 0UL;
286 EXPORT_SYMBOL_GPL(gmap_disable);
289 * gmap_get_enabled - get a pointer to the currently enabled gmap
291 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
293 struct gmap *gmap_get_enabled(void)
295 return (struct gmap *) S390_lowcore.gmap;
297 EXPORT_SYMBOL_GPL(gmap_get_enabled);
300 * gmap_alloc_table is assumed to be called with mmap_sem held
302 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
303 unsigned long init, unsigned long gaddr)
305 struct page *page;
306 unsigned long *new;
308 /* since we dont free the gmap table until gmap_free we can unlock */
309 page = alloc_pages(GFP_KERNEL, 2);
310 if (!page)
311 return -ENOMEM;
312 new = (unsigned long *) page_to_phys(page);
313 crst_table_init(new, init);
314 spin_lock(&gmap->guest_table_lock);
315 if (*table & _REGION_ENTRY_INVALID) {
316 list_add(&page->lru, &gmap->crst_list);
317 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
318 (*table & _REGION_ENTRY_TYPE_MASK);
319 page->index = gaddr;
320 page = NULL;
322 spin_unlock(&gmap->guest_table_lock);
323 if (page)
324 __free_pages(page, 2);
325 return 0;
329 * __gmap_segment_gaddr - find virtual address from segment pointer
330 * @entry: pointer to a segment table entry in the guest address space
332 * Returns the virtual address in the guest address space for the segment
334 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
336 struct page *page;
337 unsigned long offset, mask;
339 offset = (unsigned long) entry / sizeof(unsigned long);
340 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
341 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
342 page = virt_to_page((void *)((unsigned long) entry & mask));
343 return page->index + offset;
347 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
348 * @gmap: pointer to the guest address space structure
349 * @vmaddr: address in the host process address space
351 * Returns 1 if a TLB flush is required
353 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
355 unsigned long *entry;
356 int flush = 0;
358 BUG_ON(gmap_is_shadow(gmap));
359 spin_lock(&gmap->guest_table_lock);
360 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
361 if (entry) {
362 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
363 *entry = _SEGMENT_ENTRY_EMPTY;
365 spin_unlock(&gmap->guest_table_lock);
366 return flush;
370 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
371 * @gmap: pointer to the guest address space structure
372 * @gaddr: address in the guest address space
374 * Returns 1 if a TLB flush is required
376 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
378 unsigned long vmaddr;
380 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
381 gaddr >> PMD_SHIFT);
382 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
386 * gmap_unmap_segment - unmap segment from the guest address space
387 * @gmap: pointer to the guest address space structure
388 * @to: address in the guest address space
389 * @len: length of the memory area to unmap
391 * Returns 0 if the unmap succeeded, -EINVAL if not.
393 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
395 unsigned long off;
396 int flush;
398 BUG_ON(gmap_is_shadow(gmap));
399 if ((to | len) & (PMD_SIZE - 1))
400 return -EINVAL;
401 if (len == 0 || to + len < to)
402 return -EINVAL;
404 flush = 0;
405 down_write(&gmap->mm->mmap_sem);
406 for (off = 0; off < len; off += PMD_SIZE)
407 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
408 up_write(&gmap->mm->mmap_sem);
409 if (flush)
410 gmap_flush_tlb(gmap);
411 return 0;
413 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
416 * gmap_map_segment - map a segment to the guest address space
417 * @gmap: pointer to the guest address space structure
418 * @from: source address in the parent address space
419 * @to: target address in the guest address space
420 * @len: length of the memory area to map
422 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
424 int gmap_map_segment(struct gmap *gmap, unsigned long from,
425 unsigned long to, unsigned long len)
427 unsigned long off;
428 int flush;
430 BUG_ON(gmap_is_shadow(gmap));
431 if ((from | to | len) & (PMD_SIZE - 1))
432 return -EINVAL;
433 if (len == 0 || from + len < from || to + len < to ||
434 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
435 return -EINVAL;
437 flush = 0;
438 down_write(&gmap->mm->mmap_sem);
439 for (off = 0; off < len; off += PMD_SIZE) {
440 /* Remove old translation */
441 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
442 /* Store new translation */
443 if (radix_tree_insert(&gmap->guest_to_host,
444 (to + off) >> PMD_SHIFT,
445 (void *) from + off))
446 break;
448 up_write(&gmap->mm->mmap_sem);
449 if (flush)
450 gmap_flush_tlb(gmap);
451 if (off >= len)
452 return 0;
453 gmap_unmap_segment(gmap, to, len);
454 return -ENOMEM;
456 EXPORT_SYMBOL_GPL(gmap_map_segment);
459 * __gmap_translate - translate a guest address to a user space address
460 * @gmap: pointer to guest mapping meta data structure
461 * @gaddr: guest address
463 * Returns user space address which corresponds to the guest address or
464 * -EFAULT if no such mapping exists.
465 * This function does not establish potentially missing page table entries.
466 * The mmap_sem of the mm that belongs to the address space must be held
467 * when this function gets called.
469 * Note: Can also be called for shadow gmaps.
471 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
473 unsigned long vmaddr;
475 vmaddr = (unsigned long)
476 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
477 /* Note: guest_to_host is empty for a shadow gmap */
478 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
480 EXPORT_SYMBOL_GPL(__gmap_translate);
483 * gmap_translate - translate a guest address to a user space address
484 * @gmap: pointer to guest mapping meta data structure
485 * @gaddr: guest address
487 * Returns user space address which corresponds to the guest address or
488 * -EFAULT if no such mapping exists.
489 * This function does not establish potentially missing page table entries.
491 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
493 unsigned long rc;
495 down_read(&gmap->mm->mmap_sem);
496 rc = __gmap_translate(gmap, gaddr);
497 up_read(&gmap->mm->mmap_sem);
498 return rc;
500 EXPORT_SYMBOL_GPL(gmap_translate);
503 * gmap_unlink - disconnect a page table from the gmap shadow tables
504 * @gmap: pointer to guest mapping meta data structure
505 * @table: pointer to the host page table
506 * @vmaddr: vm address associated with the host page table
508 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
509 unsigned long vmaddr)
511 struct gmap *gmap;
512 int flush;
514 rcu_read_lock();
515 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
516 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
517 if (flush)
518 gmap_flush_tlb(gmap);
520 rcu_read_unlock();
524 * gmap_link - set up shadow page tables to connect a host to a guest address
525 * @gmap: pointer to guest mapping meta data structure
526 * @gaddr: guest address
527 * @vmaddr: vm address
529 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
530 * if the vm address is already mapped to a different guest segment.
531 * The mmap_sem of the mm that belongs to the address space must be held
532 * when this function gets called.
534 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
536 struct mm_struct *mm;
537 unsigned long *table;
538 spinlock_t *ptl;
539 pgd_t *pgd;
540 p4d_t *p4d;
541 pud_t *pud;
542 pmd_t *pmd;
543 int rc;
545 BUG_ON(gmap_is_shadow(gmap));
546 /* Create higher level tables in the gmap page table */
547 table = gmap->table;
548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
549 table += (gaddr >> 53) & 0x7ff;
550 if ((*table & _REGION_ENTRY_INVALID) &&
551 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
552 gaddr & 0xffe0000000000000UL))
553 return -ENOMEM;
554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
556 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
557 table += (gaddr >> 42) & 0x7ff;
558 if ((*table & _REGION_ENTRY_INVALID) &&
559 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
560 gaddr & 0xfffffc0000000000UL))
561 return -ENOMEM;
562 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
564 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
565 table += (gaddr >> 31) & 0x7ff;
566 if ((*table & _REGION_ENTRY_INVALID) &&
567 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
568 gaddr & 0xffffffff80000000UL))
569 return -ENOMEM;
570 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
572 table += (gaddr >> 20) & 0x7ff;
573 /* Walk the parent mm page table */
574 mm = gmap->mm;
575 pgd = pgd_offset(mm, vmaddr);
576 VM_BUG_ON(pgd_none(*pgd));
577 p4d = p4d_offset(pgd, vmaddr);
578 VM_BUG_ON(p4d_none(*p4d));
579 pud = pud_offset(p4d, vmaddr);
580 VM_BUG_ON(pud_none(*pud));
581 /* large puds cannot yet be handled */
582 if (pud_large(*pud))
583 return -EFAULT;
584 pmd = pmd_offset(pud, vmaddr);
585 VM_BUG_ON(pmd_none(*pmd));
586 /* large pmds cannot yet be handled */
587 if (pmd_large(*pmd))
588 return -EFAULT;
589 /* Link gmap segment table entry location to page table. */
590 rc = radix_tree_preload(GFP_KERNEL);
591 if (rc)
592 return rc;
593 ptl = pmd_lock(mm, pmd);
594 spin_lock(&gmap->guest_table_lock);
595 if (*table == _SEGMENT_ENTRY_EMPTY) {
596 rc = radix_tree_insert(&gmap->host_to_guest,
597 vmaddr >> PMD_SHIFT, table);
598 if (!rc)
599 *table = pmd_val(*pmd);
600 } else
601 rc = 0;
602 spin_unlock(&gmap->guest_table_lock);
603 spin_unlock(ptl);
604 radix_tree_preload_end();
605 return rc;
609 * gmap_fault - resolve a fault on a guest address
610 * @gmap: pointer to guest mapping meta data structure
611 * @gaddr: guest address
612 * @fault_flags: flags to pass down to handle_mm_fault()
614 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
615 * if the vm address is already mapped to a different guest segment.
617 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
618 unsigned int fault_flags)
620 unsigned long vmaddr;
621 int rc;
622 bool unlocked;
624 down_read(&gmap->mm->mmap_sem);
626 retry:
627 unlocked = false;
628 vmaddr = __gmap_translate(gmap, gaddr);
629 if (IS_ERR_VALUE(vmaddr)) {
630 rc = vmaddr;
631 goto out_up;
633 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
634 &unlocked)) {
635 rc = -EFAULT;
636 goto out_up;
639 * In the case that fixup_user_fault unlocked the mmap_sem during
640 * faultin redo __gmap_translate to not race with a map/unmap_segment.
642 if (unlocked)
643 goto retry;
645 rc = __gmap_link(gmap, gaddr, vmaddr);
646 out_up:
647 up_read(&gmap->mm->mmap_sem);
648 return rc;
650 EXPORT_SYMBOL_GPL(gmap_fault);
653 * this function is assumed to be called with mmap_sem held
655 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
657 unsigned long vmaddr;
658 spinlock_t *ptl;
659 pte_t *ptep;
661 /* Find the vm address for the guest address */
662 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
663 gaddr >> PMD_SHIFT);
664 if (vmaddr) {
665 vmaddr |= gaddr & ~PMD_MASK;
666 /* Get pointer to the page table entry */
667 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
668 if (likely(ptep))
669 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
670 pte_unmap_unlock(ptep, ptl);
673 EXPORT_SYMBOL_GPL(__gmap_zap);
675 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
677 unsigned long gaddr, vmaddr, size;
678 struct vm_area_struct *vma;
680 down_read(&gmap->mm->mmap_sem);
681 for (gaddr = from; gaddr < to;
682 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
683 /* Find the vm address for the guest address */
684 vmaddr = (unsigned long)
685 radix_tree_lookup(&gmap->guest_to_host,
686 gaddr >> PMD_SHIFT);
687 if (!vmaddr)
688 continue;
689 vmaddr |= gaddr & ~PMD_MASK;
690 /* Find vma in the parent mm */
691 vma = find_vma(gmap->mm, vmaddr);
692 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
693 zap_page_range(vma, vmaddr, size);
695 up_read(&gmap->mm->mmap_sem);
697 EXPORT_SYMBOL_GPL(gmap_discard);
699 static LIST_HEAD(gmap_notifier_list);
700 static DEFINE_SPINLOCK(gmap_notifier_lock);
703 * gmap_register_pte_notifier - register a pte invalidation callback
704 * @nb: pointer to the gmap notifier block
706 void gmap_register_pte_notifier(struct gmap_notifier *nb)
708 spin_lock(&gmap_notifier_lock);
709 list_add_rcu(&nb->list, &gmap_notifier_list);
710 spin_unlock(&gmap_notifier_lock);
712 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
715 * gmap_unregister_pte_notifier - remove a pte invalidation callback
716 * @nb: pointer to the gmap notifier block
718 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
720 spin_lock(&gmap_notifier_lock);
721 list_del_rcu(&nb->list);
722 spin_unlock(&gmap_notifier_lock);
723 synchronize_rcu();
725 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
728 * gmap_call_notifier - call all registered invalidation callbacks
729 * @gmap: pointer to guest mapping meta data structure
730 * @start: start virtual address in the guest address space
731 * @end: end virtual address in the guest address space
733 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
734 unsigned long end)
736 struct gmap_notifier *nb;
738 list_for_each_entry(nb, &gmap_notifier_list, list)
739 nb->notifier_call(gmap, start, end);
743 * gmap_table_walk - walk the gmap page tables
744 * @gmap: pointer to guest mapping meta data structure
745 * @gaddr: virtual address in the guest address space
746 * @level: page table level to stop at
748 * Returns a table entry pointer for the given guest address and @level
749 * @level=0 : returns a pointer to a page table table entry (or NULL)
750 * @level=1 : returns a pointer to a segment table entry (or NULL)
751 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
752 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
753 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
755 * Returns NULL if the gmap page tables could not be walked to the
756 * requested level.
758 * Note: Can also be called for shadow gmaps.
760 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
761 unsigned long gaddr, int level)
763 unsigned long *table;
765 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
766 return NULL;
767 if (gmap_is_shadow(gmap) && gmap->removed)
768 return NULL;
769 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
770 return NULL;
771 table = gmap->table;
772 switch (gmap->asce & _ASCE_TYPE_MASK) {
773 case _ASCE_TYPE_REGION1:
774 table += (gaddr >> 53) & 0x7ff;
775 if (level == 4)
776 break;
777 if (*table & _REGION_ENTRY_INVALID)
778 return NULL;
779 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
780 /* Fallthrough */
781 case _ASCE_TYPE_REGION2:
782 table += (gaddr >> 42) & 0x7ff;
783 if (level == 3)
784 break;
785 if (*table & _REGION_ENTRY_INVALID)
786 return NULL;
787 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
788 /* Fallthrough */
789 case _ASCE_TYPE_REGION3:
790 table += (gaddr >> 31) & 0x7ff;
791 if (level == 2)
792 break;
793 if (*table & _REGION_ENTRY_INVALID)
794 return NULL;
795 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
796 /* Fallthrough */
797 case _ASCE_TYPE_SEGMENT:
798 table += (gaddr >> 20) & 0x7ff;
799 if (level == 1)
800 break;
801 if (*table & _REGION_ENTRY_INVALID)
802 return NULL;
803 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
804 table += (gaddr >> 12) & 0xff;
806 return table;
810 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
811 * and return the pte pointer
812 * @gmap: pointer to guest mapping meta data structure
813 * @gaddr: virtual address in the guest address space
814 * @ptl: pointer to the spinlock pointer
816 * Returns a pointer to the locked pte for a guest address, or NULL
818 * Note: Can also be called for shadow gmaps.
820 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
821 spinlock_t **ptl)
823 unsigned long *table;
825 if (gmap_is_shadow(gmap))
826 spin_lock(&gmap->guest_table_lock);
827 /* Walk the gmap page table, lock and get pte pointer */
828 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
829 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
830 if (gmap_is_shadow(gmap))
831 spin_unlock(&gmap->guest_table_lock);
832 return NULL;
834 if (gmap_is_shadow(gmap)) {
835 *ptl = &gmap->guest_table_lock;
836 return pte_offset_map((pmd_t *) table, gaddr);
838 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
842 * gmap_pte_op_fixup - force a page in and connect the gmap page table
843 * @gmap: pointer to guest mapping meta data structure
844 * @gaddr: virtual address in the guest address space
845 * @vmaddr: address in the host process address space
846 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
848 * Returns 0 if the caller can retry __gmap_translate (might fail again),
849 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
850 * up or connecting the gmap page table.
852 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
853 unsigned long vmaddr, int prot)
855 struct mm_struct *mm = gmap->mm;
856 unsigned int fault_flags;
857 bool unlocked = false;
859 BUG_ON(gmap_is_shadow(gmap));
860 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
861 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
862 return -EFAULT;
863 if (unlocked)
864 /* lost mmap_sem, caller has to retry __gmap_translate */
865 return 0;
866 /* Connect the page tables */
867 return __gmap_link(gmap, gaddr, vmaddr);
871 * gmap_pte_op_end - release the page table lock
872 * @ptl: pointer to the spinlock pointer
874 static void gmap_pte_op_end(spinlock_t *ptl)
876 spin_unlock(ptl);
880 * gmap_protect_range - remove access rights to memory and set pgste bits
881 * @gmap: pointer to guest mapping meta data structure
882 * @gaddr: virtual address in the guest address space
883 * @len: size of area
884 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
885 * @bits: pgste notification bits to set
887 * Returns 0 if successfully protected, -ENOMEM if out of memory and
888 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
890 * Called with sg->mm->mmap_sem in read.
892 * Note: Can also be called for shadow gmaps.
894 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
895 unsigned long len, int prot, unsigned long bits)
897 unsigned long vmaddr;
898 spinlock_t *ptl;
899 pte_t *ptep;
900 int rc;
902 while (len) {
903 rc = -EAGAIN;
904 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
905 if (ptep) {
906 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
907 gmap_pte_op_end(ptl);
909 if (rc) {
910 vmaddr = __gmap_translate(gmap, gaddr);
911 if (IS_ERR_VALUE(vmaddr))
912 return vmaddr;
913 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
914 if (rc)
915 return rc;
916 continue;
918 gaddr += PAGE_SIZE;
919 len -= PAGE_SIZE;
921 return 0;
925 * gmap_mprotect_notify - change access rights for a range of ptes and
926 * call the notifier if any pte changes again
927 * @gmap: pointer to guest mapping meta data structure
928 * @gaddr: virtual address in the guest address space
929 * @len: size of area
930 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
932 * Returns 0 if for each page in the given range a gmap mapping exists,
933 * the new access rights could be set and the notifier could be armed.
934 * If the gmap mapping is missing for one or more pages -EFAULT is
935 * returned. If no memory could be allocated -ENOMEM is returned.
936 * This function establishes missing page table entries.
938 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
939 unsigned long len, int prot)
941 int rc;
943 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
944 return -EINVAL;
945 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
946 return -EINVAL;
947 down_read(&gmap->mm->mmap_sem);
948 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
949 up_read(&gmap->mm->mmap_sem);
950 return rc;
952 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
955 * gmap_read_table - get an unsigned long value from a guest page table using
956 * absolute addressing, without marking the page referenced.
957 * @gmap: pointer to guest mapping meta data structure
958 * @gaddr: virtual address in the guest address space
959 * @val: pointer to the unsigned long value to return
961 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
962 * if reading using the virtual address failed.
964 * Called with gmap->mm->mmap_sem in read.
966 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
968 unsigned long address, vmaddr;
969 spinlock_t *ptl;
970 pte_t *ptep, pte;
971 int rc;
973 while (1) {
974 rc = -EAGAIN;
975 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
976 if (ptep) {
977 pte = *ptep;
978 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
979 address = pte_val(pte) & PAGE_MASK;
980 address += gaddr & ~PAGE_MASK;
981 *val = *(unsigned long *) address;
982 pte_val(*ptep) |= _PAGE_YOUNG;
983 /* Do *NOT* clear the _PAGE_INVALID bit! */
984 rc = 0;
986 gmap_pte_op_end(ptl);
988 if (!rc)
989 break;
990 vmaddr = __gmap_translate(gmap, gaddr);
991 if (IS_ERR_VALUE(vmaddr)) {
992 rc = vmaddr;
993 break;
995 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
996 if (rc)
997 break;
999 return rc;
1001 EXPORT_SYMBOL_GPL(gmap_read_table);
1004 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1005 * @sg: pointer to the shadow guest address space structure
1006 * @vmaddr: vm address associated with the rmap
1007 * @rmap: pointer to the rmap structure
1009 * Called with the sg->guest_table_lock
1011 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1012 struct gmap_rmap *rmap)
1014 void __rcu **slot;
1016 BUG_ON(!gmap_is_shadow(sg));
1017 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1018 if (slot) {
1019 rmap->next = radix_tree_deref_slot_protected(slot,
1020 &sg->guest_table_lock);
1021 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1022 } else {
1023 rmap->next = NULL;
1024 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1025 rmap);
1030 * gmap_protect_rmap - modify access rights to memory and create an rmap
1031 * @sg: pointer to the shadow guest address space structure
1032 * @raddr: rmap address in the shadow gmap
1033 * @paddr: address in the parent guest address space
1034 * @len: length of the memory area to protect
1035 * @prot: indicates access rights: none, read-only or read-write
1037 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1038 * if out of memory and -EFAULT if paddr is invalid.
1040 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1041 unsigned long paddr, unsigned long len, int prot)
1043 struct gmap *parent;
1044 struct gmap_rmap *rmap;
1045 unsigned long vmaddr;
1046 spinlock_t *ptl;
1047 pte_t *ptep;
1048 int rc;
1050 BUG_ON(!gmap_is_shadow(sg));
1051 parent = sg->parent;
1052 while (len) {
1053 vmaddr = __gmap_translate(parent, paddr);
1054 if (IS_ERR_VALUE(vmaddr))
1055 return vmaddr;
1056 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1057 if (!rmap)
1058 return -ENOMEM;
1059 rmap->raddr = raddr;
1060 rc = radix_tree_preload(GFP_KERNEL);
1061 if (rc) {
1062 kfree(rmap);
1063 return rc;
1065 rc = -EAGAIN;
1066 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1067 if (ptep) {
1068 spin_lock(&sg->guest_table_lock);
1069 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1070 PGSTE_VSIE_BIT);
1071 if (!rc)
1072 gmap_insert_rmap(sg, vmaddr, rmap);
1073 spin_unlock(&sg->guest_table_lock);
1074 gmap_pte_op_end(ptl);
1076 radix_tree_preload_end();
1077 if (rc) {
1078 kfree(rmap);
1079 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1080 if (rc)
1081 return rc;
1082 continue;
1084 paddr += PAGE_SIZE;
1085 len -= PAGE_SIZE;
1087 return 0;
1090 #define _SHADOW_RMAP_MASK 0x7
1091 #define _SHADOW_RMAP_REGION1 0x5
1092 #define _SHADOW_RMAP_REGION2 0x4
1093 #define _SHADOW_RMAP_REGION3 0x3
1094 #define _SHADOW_RMAP_SEGMENT 0x2
1095 #define _SHADOW_RMAP_PGTABLE 0x1
1098 * gmap_idte_one - invalidate a single region or segment table entry
1099 * @asce: region or segment table *origin* + table-type bits
1100 * @vaddr: virtual address to identify the table entry to flush
1102 * The invalid bit of a single region or segment table entry is set
1103 * and the associated TLB entries depending on the entry are flushed.
1104 * The table-type of the @asce identifies the portion of the @vaddr
1105 * that is used as the invalidation index.
1107 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1109 asm volatile(
1110 " .insn rrf,0xb98e0000,%0,%1,0,0"
1111 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1115 * gmap_unshadow_page - remove a page from a shadow page table
1116 * @sg: pointer to the shadow guest address space structure
1117 * @raddr: rmap address in the shadow guest address space
1119 * Called with the sg->guest_table_lock
1121 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1123 unsigned long *table;
1125 BUG_ON(!gmap_is_shadow(sg));
1126 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1127 if (!table || *table & _PAGE_INVALID)
1128 return;
1129 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1130 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1134 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1135 * @sg: pointer to the shadow guest address space structure
1136 * @raddr: rmap address in the shadow guest address space
1137 * @pgt: pointer to the start of a shadow page table
1139 * Called with the sg->guest_table_lock
1141 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1142 unsigned long *pgt)
1144 int i;
1146 BUG_ON(!gmap_is_shadow(sg));
1147 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1148 pgt[i] = _PAGE_INVALID;
1152 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1153 * @sg: pointer to the shadow guest address space structure
1154 * @raddr: address in the shadow guest address space
1156 * Called with the sg->guest_table_lock
1158 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1160 unsigned long sto, *ste, *pgt;
1161 struct page *page;
1163 BUG_ON(!gmap_is_shadow(sg));
1164 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1165 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1166 return;
1167 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1168 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1169 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1170 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1171 *ste = _SEGMENT_ENTRY_EMPTY;
1172 __gmap_unshadow_pgt(sg, raddr, pgt);
1173 /* Free page table */
1174 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1175 list_del(&page->lru);
1176 page_table_free_pgste(page);
1180 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1181 * @sg: pointer to the shadow guest address space structure
1182 * @raddr: rmap address in the shadow guest address space
1183 * @sgt: pointer to the start of a shadow segment table
1185 * Called with the sg->guest_table_lock
1187 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1188 unsigned long *sgt)
1190 unsigned long asce, *pgt;
1191 struct page *page;
1192 int i;
1194 BUG_ON(!gmap_is_shadow(sg));
1195 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1196 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1197 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1198 continue;
1199 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1200 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1201 __gmap_unshadow_pgt(sg, raddr, pgt);
1202 /* Free page table */
1203 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1204 list_del(&page->lru);
1205 page_table_free_pgste(page);
1210 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1211 * @sg: pointer to the shadow guest address space structure
1212 * @raddr: rmap address in the shadow guest address space
1214 * Called with the shadow->guest_table_lock
1216 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1218 unsigned long r3o, *r3e, *sgt;
1219 struct page *page;
1221 BUG_ON(!gmap_is_shadow(sg));
1222 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1223 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1224 return;
1225 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1226 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1227 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1228 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1229 *r3e = _REGION3_ENTRY_EMPTY;
1230 __gmap_unshadow_sgt(sg, raddr, sgt);
1231 /* Free segment table */
1232 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1233 list_del(&page->lru);
1234 __free_pages(page, 2);
1238 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1239 * @sg: pointer to the shadow guest address space structure
1240 * @raddr: address in the shadow guest address space
1241 * @r3t: pointer to the start of a shadow region-3 table
1243 * Called with the sg->guest_table_lock
1245 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1246 unsigned long *r3t)
1248 unsigned long asce, *sgt;
1249 struct page *page;
1250 int i;
1252 BUG_ON(!gmap_is_shadow(sg));
1253 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1254 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1255 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1256 continue;
1257 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1258 r3t[i] = _REGION3_ENTRY_EMPTY;
1259 __gmap_unshadow_sgt(sg, raddr, sgt);
1260 /* Free segment table */
1261 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1262 list_del(&page->lru);
1263 __free_pages(page, 2);
1268 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1269 * @sg: pointer to the shadow guest address space structure
1270 * @raddr: rmap address in the shadow guest address space
1272 * Called with the sg->guest_table_lock
1274 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1276 unsigned long r2o, *r2e, *r3t;
1277 struct page *page;
1279 BUG_ON(!gmap_is_shadow(sg));
1280 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1281 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1282 return;
1283 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1284 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1285 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1286 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1287 *r2e = _REGION2_ENTRY_EMPTY;
1288 __gmap_unshadow_r3t(sg, raddr, r3t);
1289 /* Free region 3 table */
1290 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1291 list_del(&page->lru);
1292 __free_pages(page, 2);
1296 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1297 * @sg: pointer to the shadow guest address space structure
1298 * @raddr: rmap address in the shadow guest address space
1299 * @r2t: pointer to the start of a shadow region-2 table
1301 * Called with the sg->guest_table_lock
1303 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1304 unsigned long *r2t)
1306 unsigned long asce, *r3t;
1307 struct page *page;
1308 int i;
1310 BUG_ON(!gmap_is_shadow(sg));
1311 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1312 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1313 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1314 continue;
1315 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1316 r2t[i] = _REGION2_ENTRY_EMPTY;
1317 __gmap_unshadow_r3t(sg, raddr, r3t);
1318 /* Free region 3 table */
1319 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1320 list_del(&page->lru);
1321 __free_pages(page, 2);
1326 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1327 * @sg: pointer to the shadow guest address space structure
1328 * @raddr: rmap address in the shadow guest address space
1330 * Called with the sg->guest_table_lock
1332 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1334 unsigned long r1o, *r1e, *r2t;
1335 struct page *page;
1337 BUG_ON(!gmap_is_shadow(sg));
1338 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1339 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1340 return;
1341 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1342 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1343 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1344 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1345 *r1e = _REGION1_ENTRY_EMPTY;
1346 __gmap_unshadow_r2t(sg, raddr, r2t);
1347 /* Free region 2 table */
1348 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1349 list_del(&page->lru);
1350 __free_pages(page, 2);
1354 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1355 * @sg: pointer to the shadow guest address space structure
1356 * @raddr: rmap address in the shadow guest address space
1357 * @r1t: pointer to the start of a shadow region-1 table
1359 * Called with the shadow->guest_table_lock
1361 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1362 unsigned long *r1t)
1364 unsigned long asce, *r2t;
1365 struct page *page;
1366 int i;
1368 BUG_ON(!gmap_is_shadow(sg));
1369 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1370 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1371 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1372 continue;
1373 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1374 __gmap_unshadow_r2t(sg, raddr, r2t);
1375 /* Clear entry and flush translation r1t -> r2t */
1376 gmap_idte_one(asce, raddr);
1377 r1t[i] = _REGION1_ENTRY_EMPTY;
1378 /* Free region 2 table */
1379 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1380 list_del(&page->lru);
1381 __free_pages(page, 2);
1386 * gmap_unshadow - remove a shadow page table completely
1387 * @sg: pointer to the shadow guest address space structure
1389 * Called with sg->guest_table_lock
1391 static void gmap_unshadow(struct gmap *sg)
1393 unsigned long *table;
1395 BUG_ON(!gmap_is_shadow(sg));
1396 if (sg->removed)
1397 return;
1398 sg->removed = 1;
1399 gmap_call_notifier(sg, 0, -1UL);
1400 gmap_flush_tlb(sg);
1401 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1402 switch (sg->asce & _ASCE_TYPE_MASK) {
1403 case _ASCE_TYPE_REGION1:
1404 __gmap_unshadow_r1t(sg, 0, table);
1405 break;
1406 case _ASCE_TYPE_REGION2:
1407 __gmap_unshadow_r2t(sg, 0, table);
1408 break;
1409 case _ASCE_TYPE_REGION3:
1410 __gmap_unshadow_r3t(sg, 0, table);
1411 break;
1412 case _ASCE_TYPE_SEGMENT:
1413 __gmap_unshadow_sgt(sg, 0, table);
1414 break;
1419 * gmap_find_shadow - find a specific asce in the list of shadow tables
1420 * @parent: pointer to the parent gmap
1421 * @asce: ASCE for which the shadow table is created
1422 * @edat_level: edat level to be used for the shadow translation
1424 * Returns the pointer to a gmap if a shadow table with the given asce is
1425 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1426 * otherwise NULL
1428 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1429 int edat_level)
1431 struct gmap *sg;
1433 list_for_each_entry(sg, &parent->children, list) {
1434 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1435 sg->removed)
1436 continue;
1437 if (!sg->initialized)
1438 return ERR_PTR(-EAGAIN);
1439 atomic_inc(&sg->ref_count);
1440 return sg;
1442 return NULL;
1446 * gmap_shadow_valid - check if a shadow guest address space matches the
1447 * given properties and is still valid
1448 * @sg: pointer to the shadow guest address space structure
1449 * @asce: ASCE for which the shadow table is requested
1450 * @edat_level: edat level to be used for the shadow translation
1452 * Returns 1 if the gmap shadow is still valid and matches the given
1453 * properties, the caller can continue using it. Returns 0 otherwise, the
1454 * caller has to request a new shadow gmap in this case.
1457 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1459 if (sg->removed)
1460 return 0;
1461 return sg->orig_asce == asce && sg->edat_level == edat_level;
1463 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1466 * gmap_shadow - create/find a shadow guest address space
1467 * @parent: pointer to the parent gmap
1468 * @asce: ASCE for which the shadow table is created
1469 * @edat_level: edat level to be used for the shadow translation
1471 * The pages of the top level page table referred by the asce parameter
1472 * will be set to read-only and marked in the PGSTEs of the kvm process.
1473 * The shadow table will be removed automatically on any change to the
1474 * PTE mapping for the source table.
1476 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1477 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1478 * parent gmap table could not be protected.
1480 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1481 int edat_level)
1483 struct gmap *sg, *new;
1484 unsigned long limit;
1485 int rc;
1487 BUG_ON(gmap_is_shadow(parent));
1488 spin_lock(&parent->shadow_lock);
1489 sg = gmap_find_shadow(parent, asce, edat_level);
1490 spin_unlock(&parent->shadow_lock);
1491 if (sg)
1492 return sg;
1493 /* Create a new shadow gmap */
1494 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1495 if (asce & _ASCE_REAL_SPACE)
1496 limit = -1UL;
1497 new = gmap_alloc(limit);
1498 if (!new)
1499 return ERR_PTR(-ENOMEM);
1500 new->mm = parent->mm;
1501 new->parent = gmap_get(parent);
1502 new->orig_asce = asce;
1503 new->edat_level = edat_level;
1504 new->initialized = false;
1505 spin_lock(&parent->shadow_lock);
1506 /* Recheck if another CPU created the same shadow */
1507 sg = gmap_find_shadow(parent, asce, edat_level);
1508 if (sg) {
1509 spin_unlock(&parent->shadow_lock);
1510 gmap_free(new);
1511 return sg;
1513 if (asce & _ASCE_REAL_SPACE) {
1514 /* only allow one real-space gmap shadow */
1515 list_for_each_entry(sg, &parent->children, list) {
1516 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1517 spin_lock(&sg->guest_table_lock);
1518 gmap_unshadow(sg);
1519 spin_unlock(&sg->guest_table_lock);
1520 list_del(&sg->list);
1521 gmap_put(sg);
1522 break;
1526 atomic_set(&new->ref_count, 2);
1527 list_add(&new->list, &parent->children);
1528 if (asce & _ASCE_REAL_SPACE) {
1529 /* nothing to protect, return right away */
1530 new->initialized = true;
1531 spin_unlock(&parent->shadow_lock);
1532 return new;
1534 spin_unlock(&parent->shadow_lock);
1535 /* protect after insertion, so it will get properly invalidated */
1536 down_read(&parent->mm->mmap_sem);
1537 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1538 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1539 PROT_READ, PGSTE_VSIE_BIT);
1540 up_read(&parent->mm->mmap_sem);
1541 spin_lock(&parent->shadow_lock);
1542 new->initialized = true;
1543 if (rc) {
1544 list_del(&new->list);
1545 gmap_free(new);
1546 new = ERR_PTR(rc);
1548 spin_unlock(&parent->shadow_lock);
1549 return new;
1551 EXPORT_SYMBOL_GPL(gmap_shadow);
1554 * gmap_shadow_r2t - create an empty shadow region 2 table
1555 * @sg: pointer to the shadow guest address space structure
1556 * @saddr: faulting address in the shadow gmap
1557 * @r2t: parent gmap address of the region 2 table to get shadowed
1558 * @fake: r2t references contiguous guest memory block, not a r2t
1560 * The r2t parameter specifies the address of the source table. The
1561 * four pages of the source table are made read-only in the parent gmap
1562 * address space. A write to the source table area @r2t will automatically
1563 * remove the shadow r2 table and all of its decendents.
1565 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1566 * shadow table structure is incomplete, -ENOMEM if out of memory and
1567 * -EFAULT if an address in the parent gmap could not be resolved.
1569 * Called with sg->mm->mmap_sem in read.
1571 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1572 int fake)
1574 unsigned long raddr, origin, offset, len;
1575 unsigned long *s_r2t, *table;
1576 struct page *page;
1577 int rc;
1579 BUG_ON(!gmap_is_shadow(sg));
1580 /* Allocate a shadow region second table */
1581 page = alloc_pages(GFP_KERNEL, 2);
1582 if (!page)
1583 return -ENOMEM;
1584 page->index = r2t & _REGION_ENTRY_ORIGIN;
1585 if (fake)
1586 page->index |= GMAP_SHADOW_FAKE_TABLE;
1587 s_r2t = (unsigned long *) page_to_phys(page);
1588 /* Install shadow region second table */
1589 spin_lock(&sg->guest_table_lock);
1590 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1591 if (!table) {
1592 rc = -EAGAIN; /* Race with unshadow */
1593 goto out_free;
1595 if (!(*table & _REGION_ENTRY_INVALID)) {
1596 rc = 0; /* Already established */
1597 goto out_free;
1598 } else if (*table & _REGION_ENTRY_ORIGIN) {
1599 rc = -EAGAIN; /* Race with shadow */
1600 goto out_free;
1602 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1603 /* mark as invalid as long as the parent table is not protected */
1604 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1605 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1606 if (sg->edat_level >= 1)
1607 *table |= (r2t & _REGION_ENTRY_PROTECT);
1608 list_add(&page->lru, &sg->crst_list);
1609 if (fake) {
1610 /* nothing to protect for fake tables */
1611 *table &= ~_REGION_ENTRY_INVALID;
1612 spin_unlock(&sg->guest_table_lock);
1613 return 0;
1615 spin_unlock(&sg->guest_table_lock);
1616 /* Make r2t read-only in parent gmap page table */
1617 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1618 origin = r2t & _REGION_ENTRY_ORIGIN;
1619 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1620 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1621 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1622 spin_lock(&sg->guest_table_lock);
1623 if (!rc) {
1624 table = gmap_table_walk(sg, saddr, 4);
1625 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1626 (unsigned long) s_r2t)
1627 rc = -EAGAIN; /* Race with unshadow */
1628 else
1629 *table &= ~_REGION_ENTRY_INVALID;
1630 } else {
1631 gmap_unshadow_r2t(sg, raddr);
1633 spin_unlock(&sg->guest_table_lock);
1634 return rc;
1635 out_free:
1636 spin_unlock(&sg->guest_table_lock);
1637 __free_pages(page, 2);
1638 return rc;
1640 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1643 * gmap_shadow_r3t - create a shadow region 3 table
1644 * @sg: pointer to the shadow guest address space structure
1645 * @saddr: faulting address in the shadow gmap
1646 * @r3t: parent gmap address of the region 3 table to get shadowed
1647 * @fake: r3t references contiguous guest memory block, not a r3t
1649 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1650 * shadow table structure is incomplete, -ENOMEM if out of memory and
1651 * -EFAULT if an address in the parent gmap could not be resolved.
1653 * Called with sg->mm->mmap_sem in read.
1655 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1656 int fake)
1658 unsigned long raddr, origin, offset, len;
1659 unsigned long *s_r3t, *table;
1660 struct page *page;
1661 int rc;
1663 BUG_ON(!gmap_is_shadow(sg));
1664 /* Allocate a shadow region second table */
1665 page = alloc_pages(GFP_KERNEL, 2);
1666 if (!page)
1667 return -ENOMEM;
1668 page->index = r3t & _REGION_ENTRY_ORIGIN;
1669 if (fake)
1670 page->index |= GMAP_SHADOW_FAKE_TABLE;
1671 s_r3t = (unsigned long *) page_to_phys(page);
1672 /* Install shadow region second table */
1673 spin_lock(&sg->guest_table_lock);
1674 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1675 if (!table) {
1676 rc = -EAGAIN; /* Race with unshadow */
1677 goto out_free;
1679 if (!(*table & _REGION_ENTRY_INVALID)) {
1680 rc = 0; /* Already established */
1681 goto out_free;
1682 } else if (*table & _REGION_ENTRY_ORIGIN) {
1683 rc = -EAGAIN; /* Race with shadow */
1685 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1686 /* mark as invalid as long as the parent table is not protected */
1687 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1688 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1689 if (sg->edat_level >= 1)
1690 *table |= (r3t & _REGION_ENTRY_PROTECT);
1691 list_add(&page->lru, &sg->crst_list);
1692 if (fake) {
1693 /* nothing to protect for fake tables */
1694 *table &= ~_REGION_ENTRY_INVALID;
1695 spin_unlock(&sg->guest_table_lock);
1696 return 0;
1698 spin_unlock(&sg->guest_table_lock);
1699 /* Make r3t read-only in parent gmap page table */
1700 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1701 origin = r3t & _REGION_ENTRY_ORIGIN;
1702 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1703 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1704 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1705 spin_lock(&sg->guest_table_lock);
1706 if (!rc) {
1707 table = gmap_table_walk(sg, saddr, 3);
1708 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1709 (unsigned long) s_r3t)
1710 rc = -EAGAIN; /* Race with unshadow */
1711 else
1712 *table &= ~_REGION_ENTRY_INVALID;
1713 } else {
1714 gmap_unshadow_r3t(sg, raddr);
1716 spin_unlock(&sg->guest_table_lock);
1717 return rc;
1718 out_free:
1719 spin_unlock(&sg->guest_table_lock);
1720 __free_pages(page, 2);
1721 return rc;
1723 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1726 * gmap_shadow_sgt - create a shadow segment table
1727 * @sg: pointer to the shadow guest address space structure
1728 * @saddr: faulting address in the shadow gmap
1729 * @sgt: parent gmap address of the segment table to get shadowed
1730 * @fake: sgt references contiguous guest memory block, not a sgt
1732 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1733 * shadow table structure is incomplete, -ENOMEM if out of memory and
1734 * -EFAULT if an address in the parent gmap could not be resolved.
1736 * Called with sg->mm->mmap_sem in read.
1738 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1739 int fake)
1741 unsigned long raddr, origin, offset, len;
1742 unsigned long *s_sgt, *table;
1743 struct page *page;
1744 int rc;
1746 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1747 /* Allocate a shadow segment table */
1748 page = alloc_pages(GFP_KERNEL, 2);
1749 if (!page)
1750 return -ENOMEM;
1751 page->index = sgt & _REGION_ENTRY_ORIGIN;
1752 if (fake)
1753 page->index |= GMAP_SHADOW_FAKE_TABLE;
1754 s_sgt = (unsigned long *) page_to_phys(page);
1755 /* Install shadow region second table */
1756 spin_lock(&sg->guest_table_lock);
1757 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1758 if (!table) {
1759 rc = -EAGAIN; /* Race with unshadow */
1760 goto out_free;
1762 if (!(*table & _REGION_ENTRY_INVALID)) {
1763 rc = 0; /* Already established */
1764 goto out_free;
1765 } else if (*table & _REGION_ENTRY_ORIGIN) {
1766 rc = -EAGAIN; /* Race with shadow */
1767 goto out_free;
1769 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1770 /* mark as invalid as long as the parent table is not protected */
1771 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1772 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1773 if (sg->edat_level >= 1)
1774 *table |= sgt & _REGION_ENTRY_PROTECT;
1775 list_add(&page->lru, &sg->crst_list);
1776 if (fake) {
1777 /* nothing to protect for fake tables */
1778 *table &= ~_REGION_ENTRY_INVALID;
1779 spin_unlock(&sg->guest_table_lock);
1780 return 0;
1782 spin_unlock(&sg->guest_table_lock);
1783 /* Make sgt read-only in parent gmap page table */
1784 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1785 origin = sgt & _REGION_ENTRY_ORIGIN;
1786 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1787 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1788 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1789 spin_lock(&sg->guest_table_lock);
1790 if (!rc) {
1791 table = gmap_table_walk(sg, saddr, 2);
1792 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1793 (unsigned long) s_sgt)
1794 rc = -EAGAIN; /* Race with unshadow */
1795 else
1796 *table &= ~_REGION_ENTRY_INVALID;
1797 } else {
1798 gmap_unshadow_sgt(sg, raddr);
1800 spin_unlock(&sg->guest_table_lock);
1801 return rc;
1802 out_free:
1803 spin_unlock(&sg->guest_table_lock);
1804 __free_pages(page, 2);
1805 return rc;
1807 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1810 * gmap_shadow_lookup_pgtable - find a shadow page table
1811 * @sg: pointer to the shadow guest address space structure
1812 * @saddr: the address in the shadow aguest address space
1813 * @pgt: parent gmap address of the page table to get shadowed
1814 * @dat_protection: if the pgtable is marked as protected by dat
1815 * @fake: pgt references contiguous guest memory block, not a pgtable
1817 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1818 * table was not found.
1820 * Called with sg->mm->mmap_sem in read.
1822 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1823 unsigned long *pgt, int *dat_protection,
1824 int *fake)
1826 unsigned long *table;
1827 struct page *page;
1828 int rc;
1830 BUG_ON(!gmap_is_shadow(sg));
1831 spin_lock(&sg->guest_table_lock);
1832 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1833 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1834 /* Shadow page tables are full pages (pte+pgste) */
1835 page = pfn_to_page(*table >> PAGE_SHIFT);
1836 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1837 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1838 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1839 rc = 0;
1840 } else {
1841 rc = -EAGAIN;
1843 spin_unlock(&sg->guest_table_lock);
1844 return rc;
1847 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1850 * gmap_shadow_pgt - instantiate a shadow page table
1851 * @sg: pointer to the shadow guest address space structure
1852 * @saddr: faulting address in the shadow gmap
1853 * @pgt: parent gmap address of the page table to get shadowed
1854 * @fake: pgt references contiguous guest memory block, not a pgtable
1856 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1857 * shadow table structure is incomplete, -ENOMEM if out of memory,
1858 * -EFAULT if an address in the parent gmap could not be resolved and
1860 * Called with gmap->mm->mmap_sem in read
1862 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1863 int fake)
1865 unsigned long raddr, origin;
1866 unsigned long *s_pgt, *table;
1867 struct page *page;
1868 int rc;
1870 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
1871 /* Allocate a shadow page table */
1872 page = page_table_alloc_pgste(sg->mm);
1873 if (!page)
1874 return -ENOMEM;
1875 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1876 if (fake)
1877 page->index |= GMAP_SHADOW_FAKE_TABLE;
1878 s_pgt = (unsigned long *) page_to_phys(page);
1879 /* Install shadow page table */
1880 spin_lock(&sg->guest_table_lock);
1881 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1882 if (!table) {
1883 rc = -EAGAIN; /* Race with unshadow */
1884 goto out_free;
1886 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1887 rc = 0; /* Already established */
1888 goto out_free;
1889 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1890 rc = -EAGAIN; /* Race with shadow */
1891 goto out_free;
1893 /* mark as invalid as long as the parent table is not protected */
1894 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1895 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1896 list_add(&page->lru, &sg->pt_list);
1897 if (fake) {
1898 /* nothing to protect for fake tables */
1899 *table &= ~_SEGMENT_ENTRY_INVALID;
1900 spin_unlock(&sg->guest_table_lock);
1901 return 0;
1903 spin_unlock(&sg->guest_table_lock);
1904 /* Make pgt read-only in parent gmap page table (not the pgste) */
1905 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1906 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1907 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1908 spin_lock(&sg->guest_table_lock);
1909 if (!rc) {
1910 table = gmap_table_walk(sg, saddr, 1);
1911 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1912 (unsigned long) s_pgt)
1913 rc = -EAGAIN; /* Race with unshadow */
1914 else
1915 *table &= ~_SEGMENT_ENTRY_INVALID;
1916 } else {
1917 gmap_unshadow_pgt(sg, raddr);
1919 spin_unlock(&sg->guest_table_lock);
1920 return rc;
1921 out_free:
1922 spin_unlock(&sg->guest_table_lock);
1923 page_table_free_pgste(page);
1924 return rc;
1927 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1930 * gmap_shadow_page - create a shadow page mapping
1931 * @sg: pointer to the shadow guest address space structure
1932 * @saddr: faulting address in the shadow gmap
1933 * @pte: pte in parent gmap address space to get shadowed
1935 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1936 * shadow table structure is incomplete, -ENOMEM if out of memory and
1937 * -EFAULT if an address in the parent gmap could not be resolved.
1939 * Called with sg->mm->mmap_sem in read.
1941 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1943 struct gmap *parent;
1944 struct gmap_rmap *rmap;
1945 unsigned long vmaddr, paddr;
1946 spinlock_t *ptl;
1947 pte_t *sptep, *tptep;
1948 int prot;
1949 int rc;
1951 BUG_ON(!gmap_is_shadow(sg));
1952 parent = sg->parent;
1953 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
1955 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1956 if (!rmap)
1957 return -ENOMEM;
1958 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1960 while (1) {
1961 paddr = pte_val(pte) & PAGE_MASK;
1962 vmaddr = __gmap_translate(parent, paddr);
1963 if (IS_ERR_VALUE(vmaddr)) {
1964 rc = vmaddr;
1965 break;
1967 rc = radix_tree_preload(GFP_KERNEL);
1968 if (rc)
1969 break;
1970 rc = -EAGAIN;
1971 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1972 if (sptep) {
1973 spin_lock(&sg->guest_table_lock);
1974 /* Get page table pointer */
1975 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1976 if (!tptep) {
1977 spin_unlock(&sg->guest_table_lock);
1978 gmap_pte_op_end(ptl);
1979 radix_tree_preload_end();
1980 break;
1982 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1983 if (rc > 0) {
1984 /* Success and a new mapping */
1985 gmap_insert_rmap(sg, vmaddr, rmap);
1986 rmap = NULL;
1987 rc = 0;
1989 gmap_pte_op_end(ptl);
1990 spin_unlock(&sg->guest_table_lock);
1992 radix_tree_preload_end();
1993 if (!rc)
1994 break;
1995 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1996 if (rc)
1997 break;
1999 kfree(rmap);
2000 return rc;
2002 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2005 * gmap_shadow_notify - handle notifications for shadow gmap
2007 * Called with sg->parent->shadow_lock.
2009 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2010 unsigned long gaddr, pte_t *pte)
2012 struct gmap_rmap *rmap, *rnext, *head;
2013 unsigned long start, end, bits, raddr;
2015 BUG_ON(!gmap_is_shadow(sg));
2017 spin_lock(&sg->guest_table_lock);
2018 if (sg->removed) {
2019 spin_unlock(&sg->guest_table_lock);
2020 return;
2022 /* Check for top level table */
2023 start = sg->orig_asce & _ASCE_ORIGIN;
2024 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
2025 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2026 gaddr < end) {
2027 /* The complete shadow table has to go */
2028 gmap_unshadow(sg);
2029 spin_unlock(&sg->guest_table_lock);
2030 list_del(&sg->list);
2031 gmap_put(sg);
2032 return;
2034 /* Remove the page table tree from on specific entry */
2035 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
2036 gmap_for_each_rmap_safe(rmap, rnext, head) {
2037 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2038 raddr = rmap->raddr ^ bits;
2039 switch (bits) {
2040 case _SHADOW_RMAP_REGION1:
2041 gmap_unshadow_r2t(sg, raddr);
2042 break;
2043 case _SHADOW_RMAP_REGION2:
2044 gmap_unshadow_r3t(sg, raddr);
2045 break;
2046 case _SHADOW_RMAP_REGION3:
2047 gmap_unshadow_sgt(sg, raddr);
2048 break;
2049 case _SHADOW_RMAP_SEGMENT:
2050 gmap_unshadow_pgt(sg, raddr);
2051 break;
2052 case _SHADOW_RMAP_PGTABLE:
2053 gmap_unshadow_page(sg, raddr);
2054 break;
2056 kfree(rmap);
2058 spin_unlock(&sg->guest_table_lock);
2062 * ptep_notify - call all invalidation callbacks for a specific pte.
2063 * @mm: pointer to the process mm_struct
2064 * @addr: virtual address in the process address space
2065 * @pte: pointer to the page table entry
2066 * @bits: bits from the pgste that caused the notify call
2068 * This function is assumed to be called with the page table lock held
2069 * for the pte to notify.
2071 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2072 pte_t *pte, unsigned long bits)
2074 unsigned long offset, gaddr = 0;
2075 unsigned long *table;
2076 struct gmap *gmap, *sg, *next;
2078 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2079 offset = offset * (4096 / sizeof(pte_t));
2080 rcu_read_lock();
2081 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2082 spin_lock(&gmap->guest_table_lock);
2083 table = radix_tree_lookup(&gmap->host_to_guest,
2084 vmaddr >> PMD_SHIFT);
2085 if (table)
2086 gaddr = __gmap_segment_gaddr(table) + offset;
2087 spin_unlock(&gmap->guest_table_lock);
2088 if (!table)
2089 continue;
2091 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2092 spin_lock(&gmap->shadow_lock);
2093 list_for_each_entry_safe(sg, next,
2094 &gmap->children, list)
2095 gmap_shadow_notify(sg, vmaddr, gaddr, pte);
2096 spin_unlock(&gmap->shadow_lock);
2098 if (bits & PGSTE_IN_BIT)
2099 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2101 rcu_read_unlock();
2103 EXPORT_SYMBOL_GPL(ptep_notify);
2105 static inline void thp_split_mm(struct mm_struct *mm)
2107 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2108 struct vm_area_struct *vma;
2109 unsigned long addr;
2111 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2112 for (addr = vma->vm_start;
2113 addr < vma->vm_end;
2114 addr += PAGE_SIZE)
2115 follow_page(vma, addr, FOLL_SPLIT);
2116 vma->vm_flags &= ~VM_HUGEPAGE;
2117 vma->vm_flags |= VM_NOHUGEPAGE;
2119 mm->def_flags |= VM_NOHUGEPAGE;
2120 #endif
2124 * Remove all empty zero pages from the mapping for lazy refaulting
2125 * - This must be called after mm->context.has_pgste is set, to avoid
2126 * future creation of zero pages
2127 * - This must be called after THP was enabled
2129 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2130 unsigned long end, struct mm_walk *walk)
2132 unsigned long addr;
2134 for (addr = start; addr != end; addr += PAGE_SIZE) {
2135 pte_t *ptep;
2136 spinlock_t *ptl;
2138 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2139 if (is_zero_pfn(pte_pfn(*ptep)))
2140 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2141 pte_unmap_unlock(ptep, ptl);
2143 return 0;
2146 static inline void zap_zero_pages(struct mm_struct *mm)
2148 struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
2150 walk.mm = mm;
2151 walk_page_range(0, TASK_SIZE, &walk);
2155 * switch on pgstes for its userspace process (for kvm)
2157 int s390_enable_sie(void)
2159 struct mm_struct *mm = current->mm;
2161 /* Do we have pgstes? if yes, we are done */
2162 if (mm_has_pgste(mm))
2163 return 0;
2164 /* Fail if the page tables are 2K */
2165 if (!mm_alloc_pgste(mm))
2166 return -EINVAL;
2167 down_write(&mm->mmap_sem);
2168 mm->context.has_pgste = 1;
2169 /* split thp mappings and disable thp for future mappings */
2170 thp_split_mm(mm);
2171 zap_zero_pages(mm);
2172 up_write(&mm->mmap_sem);
2173 return 0;
2175 EXPORT_SYMBOL_GPL(s390_enable_sie);
2178 * Enable storage key handling from now on and initialize the storage
2179 * keys with the default key.
2181 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2182 unsigned long next, struct mm_walk *walk)
2184 /* Clear storage key */
2185 ptep_zap_key(walk->mm, addr, pte);
2186 return 0;
2189 int s390_enable_skey(void)
2191 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2192 struct mm_struct *mm = current->mm;
2193 struct vm_area_struct *vma;
2194 int rc = 0;
2196 down_write(&mm->mmap_sem);
2197 if (mm_use_skey(mm))
2198 goto out_up;
2200 mm->context.use_skey = 1;
2201 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2202 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2203 MADV_UNMERGEABLE, &vma->vm_flags)) {
2204 mm->context.use_skey = 0;
2205 rc = -ENOMEM;
2206 goto out_up;
2209 mm->def_flags &= ~VM_MERGEABLE;
2211 walk.mm = mm;
2212 walk_page_range(0, TASK_SIZE, &walk);
2214 out_up:
2215 up_write(&mm->mmap_sem);
2216 return rc;
2218 EXPORT_SYMBOL_GPL(s390_enable_skey);
2221 * Reset CMMA state, make all pages stable again.
2223 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2224 unsigned long next, struct mm_walk *walk)
2226 ptep_zap_unused(walk->mm, addr, pte, 1);
2227 return 0;
2230 void s390_reset_cmma(struct mm_struct *mm)
2232 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2234 down_write(&mm->mmap_sem);
2235 walk.mm = mm;
2236 walk_page_range(0, TASK_SIZE, &walk);
2237 up_write(&mm->mmap_sem);
2239 EXPORT_SYMBOL_GPL(s390_reset_cmma);