Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / arch / s390 / mm / gmap.c
blob2c55a2b9d6c65bde78efacf77c71826001b3d4b1
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KVM guest address space mapping code
5 * Copyright IBM Corp. 2007, 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/slab.h>
15 #include <linux/swapops.h>
16 #include <linux/ksm.h>
17 #include <linux/mman.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/gmap.h>
22 #include <asm/tlb.h>
24 #define GMAP_SHADOW_FAKE_TABLE 1ULL
26 /**
27 * gmap_alloc - allocate and initialize a guest address space
28 * @mm: pointer to the parent mm_struct
29 * @limit: maximum address of the gmap address space
31 * Returns a guest address space structure.
33 static struct gmap *gmap_alloc(unsigned long limit)
35 struct gmap *gmap;
36 struct page *page;
37 unsigned long *table;
38 unsigned long etype, atype;
40 if (limit < _REGION3_SIZE) {
41 limit = _REGION3_SIZE - 1;
42 atype = _ASCE_TYPE_SEGMENT;
43 etype = _SEGMENT_ENTRY_EMPTY;
44 } else if (limit < _REGION2_SIZE) {
45 limit = _REGION2_SIZE - 1;
46 atype = _ASCE_TYPE_REGION3;
47 etype = _REGION3_ENTRY_EMPTY;
48 } else if (limit < _REGION1_SIZE) {
49 limit = _REGION1_SIZE - 1;
50 atype = _ASCE_TYPE_REGION2;
51 etype = _REGION2_ENTRY_EMPTY;
52 } else {
53 limit = -1UL;
54 atype = _ASCE_TYPE_REGION1;
55 etype = _REGION1_ENTRY_EMPTY;
57 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
58 if (!gmap)
59 goto out;
60 INIT_LIST_HEAD(&gmap->crst_list);
61 INIT_LIST_HEAD(&gmap->children);
62 INIT_LIST_HEAD(&gmap->pt_list);
63 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
64 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
65 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
66 spin_lock_init(&gmap->guest_table_lock);
67 spin_lock_init(&gmap->shadow_lock);
68 atomic_set(&gmap->ref_count, 1);
69 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
70 if (!page)
71 goto out_free;
72 page->index = 0;
73 list_add(&page->lru, &gmap->crst_list);
74 table = (unsigned long *) page_to_phys(page);
75 crst_table_init(table, etype);
76 gmap->table = table;
77 gmap->asce = atype | _ASCE_TABLE_LENGTH |
78 _ASCE_USER_BITS | __pa(table);
79 gmap->asce_end = limit;
80 return gmap;
82 out_free:
83 kfree(gmap);
84 out:
85 return NULL;
88 /**
89 * gmap_create - create a guest address space
90 * @mm: pointer to the parent mm_struct
91 * @limit: maximum size of the gmap address space
93 * Returns a guest address space structure.
95 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
97 struct gmap *gmap;
98 unsigned long gmap_asce;
100 gmap = gmap_alloc(limit);
101 if (!gmap)
102 return NULL;
103 gmap->mm = mm;
104 spin_lock(&mm->context.lock);
105 list_add_rcu(&gmap->list, &mm->context.gmap_list);
106 if (list_is_singular(&mm->context.gmap_list))
107 gmap_asce = gmap->asce;
108 else
109 gmap_asce = -1UL;
110 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
111 spin_unlock(&mm->context.lock);
112 return gmap;
114 EXPORT_SYMBOL_GPL(gmap_create);
116 static void gmap_flush_tlb(struct gmap *gmap)
118 if (MACHINE_HAS_IDTE)
119 __tlb_flush_idte(gmap->asce);
120 else
121 __tlb_flush_global();
124 static void gmap_radix_tree_free(struct radix_tree_root *root)
126 struct radix_tree_iter iter;
127 unsigned long indices[16];
128 unsigned long index;
129 void __rcu **slot;
130 int i, nr;
132 /* A radix tree is freed by deleting all of its entries */
133 index = 0;
134 do {
135 nr = 0;
136 radix_tree_for_each_slot(slot, root, &iter, index) {
137 indices[nr] = iter.index;
138 if (++nr == 16)
139 break;
141 for (i = 0; i < nr; i++) {
142 index = indices[i];
143 radix_tree_delete(root, index);
145 } while (nr > 0);
148 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
150 struct gmap_rmap *rmap, *rnext, *head;
151 struct radix_tree_iter iter;
152 unsigned long indices[16];
153 unsigned long index;
154 void __rcu **slot;
155 int i, nr;
157 /* A radix tree is freed by deleting all of its entries */
158 index = 0;
159 do {
160 nr = 0;
161 radix_tree_for_each_slot(slot, root, &iter, index) {
162 indices[nr] = iter.index;
163 if (++nr == 16)
164 break;
166 for (i = 0; i < nr; i++) {
167 index = indices[i];
168 head = radix_tree_delete(root, index);
169 gmap_for_each_rmap_safe(rmap, rnext, head)
170 kfree(rmap);
172 } while (nr > 0);
176 * gmap_free - free a guest address space
177 * @gmap: pointer to the guest address space structure
179 * No locks required. There are no references to this gmap anymore.
181 static void gmap_free(struct gmap *gmap)
183 struct page *page, *next;
185 /* Flush tlb of all gmaps (if not already done for shadows) */
186 if (!(gmap_is_shadow(gmap) && gmap->removed))
187 gmap_flush_tlb(gmap);
188 /* Free all segment & region tables. */
189 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
190 __free_pages(page, CRST_ALLOC_ORDER);
191 gmap_radix_tree_free(&gmap->guest_to_host);
192 gmap_radix_tree_free(&gmap->host_to_guest);
194 /* Free additional data for a shadow gmap */
195 if (gmap_is_shadow(gmap)) {
196 /* Free all page tables. */
197 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
198 page_table_free_pgste(page);
199 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
200 /* Release reference to the parent */
201 gmap_put(gmap->parent);
204 kfree(gmap);
208 * gmap_get - increase reference counter for guest address space
209 * @gmap: pointer to the guest address space structure
211 * Returns the gmap pointer
213 struct gmap *gmap_get(struct gmap *gmap)
215 atomic_inc(&gmap->ref_count);
216 return gmap;
218 EXPORT_SYMBOL_GPL(gmap_get);
221 * gmap_put - decrease reference counter for guest address space
222 * @gmap: pointer to the guest address space structure
224 * If the reference counter reaches zero the guest address space is freed.
226 void gmap_put(struct gmap *gmap)
228 if (atomic_dec_return(&gmap->ref_count) == 0)
229 gmap_free(gmap);
231 EXPORT_SYMBOL_GPL(gmap_put);
234 * gmap_remove - remove a guest address space but do not free it yet
235 * @gmap: pointer to the guest address space structure
237 void gmap_remove(struct gmap *gmap)
239 struct gmap *sg, *next;
240 unsigned long gmap_asce;
242 /* Remove all shadow gmaps linked to this gmap */
243 if (!list_empty(&gmap->children)) {
244 spin_lock(&gmap->shadow_lock);
245 list_for_each_entry_safe(sg, next, &gmap->children, list) {
246 list_del(&sg->list);
247 gmap_put(sg);
249 spin_unlock(&gmap->shadow_lock);
251 /* Remove gmap from the pre-mm list */
252 spin_lock(&gmap->mm->context.lock);
253 list_del_rcu(&gmap->list);
254 if (list_empty(&gmap->mm->context.gmap_list))
255 gmap_asce = 0;
256 else if (list_is_singular(&gmap->mm->context.gmap_list))
257 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
258 struct gmap, list)->asce;
259 else
260 gmap_asce = -1UL;
261 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
262 spin_unlock(&gmap->mm->context.lock);
263 synchronize_rcu();
264 /* Put reference */
265 gmap_put(gmap);
267 EXPORT_SYMBOL_GPL(gmap_remove);
270 * gmap_enable - switch primary space to the guest address space
271 * @gmap: pointer to the guest address space structure
273 void gmap_enable(struct gmap *gmap)
275 S390_lowcore.gmap = (unsigned long) gmap;
277 EXPORT_SYMBOL_GPL(gmap_enable);
280 * gmap_disable - switch back to the standard primary address space
281 * @gmap: pointer to the guest address space structure
283 void gmap_disable(struct gmap *gmap)
285 S390_lowcore.gmap = 0UL;
287 EXPORT_SYMBOL_GPL(gmap_disable);
290 * gmap_get_enabled - get a pointer to the currently enabled gmap
292 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
294 struct gmap *gmap_get_enabled(void)
296 return (struct gmap *) S390_lowcore.gmap;
298 EXPORT_SYMBOL_GPL(gmap_get_enabled);
301 * gmap_alloc_table is assumed to be called with mmap_sem held
303 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
304 unsigned long init, unsigned long gaddr)
306 struct page *page;
307 unsigned long *new;
309 /* since we dont free the gmap table until gmap_free we can unlock */
310 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
311 if (!page)
312 return -ENOMEM;
313 new = (unsigned long *) page_to_phys(page);
314 crst_table_init(new, init);
315 spin_lock(&gmap->guest_table_lock);
316 if (*table & _REGION_ENTRY_INVALID) {
317 list_add(&page->lru, &gmap->crst_list);
318 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
319 (*table & _REGION_ENTRY_TYPE_MASK);
320 page->index = gaddr;
321 page = NULL;
323 spin_unlock(&gmap->guest_table_lock);
324 if (page)
325 __free_pages(page, CRST_ALLOC_ORDER);
326 return 0;
330 * __gmap_segment_gaddr - find virtual address from segment pointer
331 * @entry: pointer to a segment table entry in the guest address space
333 * Returns the virtual address in the guest address space for the segment
335 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
337 struct page *page;
338 unsigned long offset, mask;
340 offset = (unsigned long) entry / sizeof(unsigned long);
341 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
342 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
343 page = virt_to_page((void *)((unsigned long) entry & mask));
344 return page->index + offset;
348 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
349 * @gmap: pointer to the guest address space structure
350 * @vmaddr: address in the host process address space
352 * Returns 1 if a TLB flush is required
354 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
356 unsigned long *entry;
357 int flush = 0;
359 BUG_ON(gmap_is_shadow(gmap));
360 spin_lock(&gmap->guest_table_lock);
361 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
362 if (entry) {
363 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
364 *entry = _SEGMENT_ENTRY_EMPTY;
366 spin_unlock(&gmap->guest_table_lock);
367 return flush;
371 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
372 * @gmap: pointer to the guest address space structure
373 * @gaddr: address in the guest address space
375 * Returns 1 if a TLB flush is required
377 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
379 unsigned long vmaddr;
381 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
382 gaddr >> PMD_SHIFT);
383 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
387 * gmap_unmap_segment - unmap segment from the guest address space
388 * @gmap: pointer to the guest address space structure
389 * @to: address in the guest address space
390 * @len: length of the memory area to unmap
392 * Returns 0 if the unmap succeeded, -EINVAL if not.
394 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
396 unsigned long off;
397 int flush;
399 BUG_ON(gmap_is_shadow(gmap));
400 if ((to | len) & (PMD_SIZE - 1))
401 return -EINVAL;
402 if (len == 0 || to + len < to)
403 return -EINVAL;
405 flush = 0;
406 down_write(&gmap->mm->mmap_sem);
407 for (off = 0; off < len; off += PMD_SIZE)
408 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
409 up_write(&gmap->mm->mmap_sem);
410 if (flush)
411 gmap_flush_tlb(gmap);
412 return 0;
414 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
417 * gmap_map_segment - map a segment to the guest address space
418 * @gmap: pointer to the guest address space structure
419 * @from: source address in the parent address space
420 * @to: target address in the guest address space
421 * @len: length of the memory area to map
423 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
425 int gmap_map_segment(struct gmap *gmap, unsigned long from,
426 unsigned long to, unsigned long len)
428 unsigned long off;
429 int flush;
431 BUG_ON(gmap_is_shadow(gmap));
432 if ((from | to | len) & (PMD_SIZE - 1))
433 return -EINVAL;
434 if (len == 0 || from + len < from || to + len < to ||
435 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
436 return -EINVAL;
438 flush = 0;
439 down_write(&gmap->mm->mmap_sem);
440 for (off = 0; off < len; off += PMD_SIZE) {
441 /* Remove old translation */
442 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
443 /* Store new translation */
444 if (radix_tree_insert(&gmap->guest_to_host,
445 (to + off) >> PMD_SHIFT,
446 (void *) from + off))
447 break;
449 up_write(&gmap->mm->mmap_sem);
450 if (flush)
451 gmap_flush_tlb(gmap);
452 if (off >= len)
453 return 0;
454 gmap_unmap_segment(gmap, to, len);
455 return -ENOMEM;
457 EXPORT_SYMBOL_GPL(gmap_map_segment);
460 * __gmap_translate - translate a guest address to a user space address
461 * @gmap: pointer to guest mapping meta data structure
462 * @gaddr: guest address
464 * Returns user space address which corresponds to the guest address or
465 * -EFAULT if no such mapping exists.
466 * This function does not establish potentially missing page table entries.
467 * The mmap_sem of the mm that belongs to the address space must be held
468 * when this function gets called.
470 * Note: Can also be called for shadow gmaps.
472 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
474 unsigned long vmaddr;
476 vmaddr = (unsigned long)
477 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
478 /* Note: guest_to_host is empty for a shadow gmap */
479 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
481 EXPORT_SYMBOL_GPL(__gmap_translate);
484 * gmap_translate - translate a guest address to a user space address
485 * @gmap: pointer to guest mapping meta data structure
486 * @gaddr: guest address
488 * Returns user space address which corresponds to the guest address or
489 * -EFAULT if no such mapping exists.
490 * This function does not establish potentially missing page table entries.
492 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
494 unsigned long rc;
496 down_read(&gmap->mm->mmap_sem);
497 rc = __gmap_translate(gmap, gaddr);
498 up_read(&gmap->mm->mmap_sem);
499 return rc;
501 EXPORT_SYMBOL_GPL(gmap_translate);
504 * gmap_unlink - disconnect a page table from the gmap shadow tables
505 * @gmap: pointer to guest mapping meta data structure
506 * @table: pointer to the host page table
507 * @vmaddr: vm address associated with the host page table
509 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
510 unsigned long vmaddr)
512 struct gmap *gmap;
513 int flush;
515 rcu_read_lock();
516 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
517 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
518 if (flush)
519 gmap_flush_tlb(gmap);
521 rcu_read_unlock();
525 * gmap_link - set up shadow page tables to connect a host to a guest address
526 * @gmap: pointer to guest mapping meta data structure
527 * @gaddr: guest address
528 * @vmaddr: vm address
530 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
531 * if the vm address is already mapped to a different guest segment.
532 * The mmap_sem of the mm that belongs to the address space must be held
533 * when this function gets called.
535 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
537 struct mm_struct *mm;
538 unsigned long *table;
539 spinlock_t *ptl;
540 pgd_t *pgd;
541 p4d_t *p4d;
542 pud_t *pud;
543 pmd_t *pmd;
544 int rc;
546 BUG_ON(gmap_is_shadow(gmap));
547 /* Create higher level tables in the gmap page table */
548 table = gmap->table;
549 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
550 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
551 if ((*table & _REGION_ENTRY_INVALID) &&
552 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
553 gaddr & _REGION1_MASK))
554 return -ENOMEM;
555 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
557 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
558 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
559 if ((*table & _REGION_ENTRY_INVALID) &&
560 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
561 gaddr & _REGION2_MASK))
562 return -ENOMEM;
563 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
565 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
566 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
567 if ((*table & _REGION_ENTRY_INVALID) &&
568 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
569 gaddr & _REGION3_MASK))
570 return -ENOMEM;
571 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
573 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
574 /* Walk the parent mm page table */
575 mm = gmap->mm;
576 pgd = pgd_offset(mm, vmaddr);
577 VM_BUG_ON(pgd_none(*pgd));
578 p4d = p4d_offset(pgd, vmaddr);
579 VM_BUG_ON(p4d_none(*p4d));
580 pud = pud_offset(p4d, vmaddr);
581 VM_BUG_ON(pud_none(*pud));
582 /* large puds cannot yet be handled */
583 if (pud_large(*pud))
584 return -EFAULT;
585 pmd = pmd_offset(pud, vmaddr);
586 VM_BUG_ON(pmd_none(*pmd));
587 /* large pmds cannot yet be handled */
588 if (pmd_large(*pmd))
589 return -EFAULT;
590 /* Link gmap segment table entry location to page table. */
591 rc = radix_tree_preload(GFP_KERNEL);
592 if (rc)
593 return rc;
594 ptl = pmd_lock(mm, pmd);
595 spin_lock(&gmap->guest_table_lock);
596 if (*table == _SEGMENT_ENTRY_EMPTY) {
597 rc = radix_tree_insert(&gmap->host_to_guest,
598 vmaddr >> PMD_SHIFT, table);
599 if (!rc)
600 *table = pmd_val(*pmd);
601 } else
602 rc = 0;
603 spin_unlock(&gmap->guest_table_lock);
604 spin_unlock(ptl);
605 radix_tree_preload_end();
606 return rc;
610 * gmap_fault - resolve a fault on a guest address
611 * @gmap: pointer to guest mapping meta data structure
612 * @gaddr: guest address
613 * @fault_flags: flags to pass down to handle_mm_fault()
615 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
616 * if the vm address is already mapped to a different guest segment.
618 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
619 unsigned int fault_flags)
621 unsigned long vmaddr;
622 int rc;
623 bool unlocked;
625 down_read(&gmap->mm->mmap_sem);
627 retry:
628 unlocked = false;
629 vmaddr = __gmap_translate(gmap, gaddr);
630 if (IS_ERR_VALUE(vmaddr)) {
631 rc = vmaddr;
632 goto out_up;
634 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
635 &unlocked)) {
636 rc = -EFAULT;
637 goto out_up;
640 * In the case that fixup_user_fault unlocked the mmap_sem during
641 * faultin redo __gmap_translate to not race with a map/unmap_segment.
643 if (unlocked)
644 goto retry;
646 rc = __gmap_link(gmap, gaddr, vmaddr);
647 out_up:
648 up_read(&gmap->mm->mmap_sem);
649 return rc;
651 EXPORT_SYMBOL_GPL(gmap_fault);
654 * this function is assumed to be called with mmap_sem held
656 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
658 unsigned long vmaddr;
659 spinlock_t *ptl;
660 pte_t *ptep;
662 /* Find the vm address for the guest address */
663 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
664 gaddr >> PMD_SHIFT);
665 if (vmaddr) {
666 vmaddr |= gaddr & ~PMD_MASK;
667 /* Get pointer to the page table entry */
668 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
669 if (likely(ptep))
670 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
671 pte_unmap_unlock(ptep, ptl);
674 EXPORT_SYMBOL_GPL(__gmap_zap);
676 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
678 unsigned long gaddr, vmaddr, size;
679 struct vm_area_struct *vma;
681 down_read(&gmap->mm->mmap_sem);
682 for (gaddr = from; gaddr < to;
683 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
684 /* Find the vm address for the guest address */
685 vmaddr = (unsigned long)
686 radix_tree_lookup(&gmap->guest_to_host,
687 gaddr >> PMD_SHIFT);
688 if (!vmaddr)
689 continue;
690 vmaddr |= gaddr & ~PMD_MASK;
691 /* Find vma in the parent mm */
692 vma = find_vma(gmap->mm, vmaddr);
693 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
694 zap_page_range(vma, vmaddr, size);
696 up_read(&gmap->mm->mmap_sem);
698 EXPORT_SYMBOL_GPL(gmap_discard);
700 static LIST_HEAD(gmap_notifier_list);
701 static DEFINE_SPINLOCK(gmap_notifier_lock);
704 * gmap_register_pte_notifier - register a pte invalidation callback
705 * @nb: pointer to the gmap notifier block
707 void gmap_register_pte_notifier(struct gmap_notifier *nb)
709 spin_lock(&gmap_notifier_lock);
710 list_add_rcu(&nb->list, &gmap_notifier_list);
711 spin_unlock(&gmap_notifier_lock);
713 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
716 * gmap_unregister_pte_notifier - remove a pte invalidation callback
717 * @nb: pointer to the gmap notifier block
719 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
721 spin_lock(&gmap_notifier_lock);
722 list_del_rcu(&nb->list);
723 spin_unlock(&gmap_notifier_lock);
724 synchronize_rcu();
726 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
729 * gmap_call_notifier - call all registered invalidation callbacks
730 * @gmap: pointer to guest mapping meta data structure
731 * @start: start virtual address in the guest address space
732 * @end: end virtual address in the guest address space
734 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
735 unsigned long end)
737 struct gmap_notifier *nb;
739 list_for_each_entry(nb, &gmap_notifier_list, list)
740 nb->notifier_call(gmap, start, end);
744 * gmap_table_walk - walk the gmap page tables
745 * @gmap: pointer to guest mapping meta data structure
746 * @gaddr: virtual address in the guest address space
747 * @level: page table level to stop at
749 * Returns a table entry pointer for the given guest address and @level
750 * @level=0 : returns a pointer to a page table table entry (or NULL)
751 * @level=1 : returns a pointer to a segment table entry (or NULL)
752 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
753 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
754 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
756 * Returns NULL if the gmap page tables could not be walked to the
757 * requested level.
759 * Note: Can also be called for shadow gmaps.
761 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
762 unsigned long gaddr, int level)
764 unsigned long *table;
766 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
767 return NULL;
768 if (gmap_is_shadow(gmap) && gmap->removed)
769 return NULL;
770 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
771 return NULL;
772 table = gmap->table;
773 switch (gmap->asce & _ASCE_TYPE_MASK) {
774 case _ASCE_TYPE_REGION1:
775 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
776 if (level == 4)
777 break;
778 if (*table & _REGION_ENTRY_INVALID)
779 return NULL;
780 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
781 /* Fallthrough */
782 case _ASCE_TYPE_REGION2:
783 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
784 if (level == 3)
785 break;
786 if (*table & _REGION_ENTRY_INVALID)
787 return NULL;
788 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
789 /* Fallthrough */
790 case _ASCE_TYPE_REGION3:
791 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
792 if (level == 2)
793 break;
794 if (*table & _REGION_ENTRY_INVALID)
795 return NULL;
796 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
797 /* Fallthrough */
798 case _ASCE_TYPE_SEGMENT:
799 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
800 if (level == 1)
801 break;
802 if (*table & _REGION_ENTRY_INVALID)
803 return NULL;
804 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
805 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
807 return table;
811 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
812 * and return the pte pointer
813 * @gmap: pointer to guest mapping meta data structure
814 * @gaddr: virtual address in the guest address space
815 * @ptl: pointer to the spinlock pointer
817 * Returns a pointer to the locked pte for a guest address, or NULL
819 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
820 spinlock_t **ptl)
822 unsigned long *table;
824 BUG_ON(gmap_is_shadow(gmap));
825 /* Walk the gmap page table, lock and get pte pointer */
826 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
827 if (!table || *table & _SEGMENT_ENTRY_INVALID)
828 return NULL;
829 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
833 * gmap_pte_op_fixup - force a page in and connect the gmap page table
834 * @gmap: pointer to guest mapping meta data structure
835 * @gaddr: virtual address in the guest address space
836 * @vmaddr: address in the host process address space
837 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
839 * Returns 0 if the caller can retry __gmap_translate (might fail again),
840 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
841 * up or connecting the gmap page table.
843 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
844 unsigned long vmaddr, int prot)
846 struct mm_struct *mm = gmap->mm;
847 unsigned int fault_flags;
848 bool unlocked = false;
850 BUG_ON(gmap_is_shadow(gmap));
851 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
852 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
853 return -EFAULT;
854 if (unlocked)
855 /* lost mmap_sem, caller has to retry __gmap_translate */
856 return 0;
857 /* Connect the page tables */
858 return __gmap_link(gmap, gaddr, vmaddr);
862 * gmap_pte_op_end - release the page table lock
863 * @ptl: pointer to the spinlock pointer
865 static void gmap_pte_op_end(spinlock_t *ptl)
867 spin_unlock(ptl);
871 * gmap_protect_range - remove access rights to memory and set pgste bits
872 * @gmap: pointer to guest mapping meta data structure
873 * @gaddr: virtual address in the guest address space
874 * @len: size of area
875 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
876 * @bits: pgste notification bits to set
878 * Returns 0 if successfully protected, -ENOMEM if out of memory and
879 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
881 * Called with sg->mm->mmap_sem in read.
883 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
884 unsigned long len, int prot, unsigned long bits)
886 unsigned long vmaddr;
887 spinlock_t *ptl;
888 pte_t *ptep;
889 int rc;
891 BUG_ON(gmap_is_shadow(gmap));
892 while (len) {
893 rc = -EAGAIN;
894 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
895 if (ptep) {
896 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
897 gmap_pte_op_end(ptl);
899 if (rc) {
900 vmaddr = __gmap_translate(gmap, gaddr);
901 if (IS_ERR_VALUE(vmaddr))
902 return vmaddr;
903 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
904 if (rc)
905 return rc;
906 continue;
908 gaddr += PAGE_SIZE;
909 len -= PAGE_SIZE;
911 return 0;
915 * gmap_mprotect_notify - change access rights for a range of ptes and
916 * call the notifier if any pte changes again
917 * @gmap: pointer to guest mapping meta data structure
918 * @gaddr: virtual address in the guest address space
919 * @len: size of area
920 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
922 * Returns 0 if for each page in the given range a gmap mapping exists,
923 * the new access rights could be set and the notifier could be armed.
924 * If the gmap mapping is missing for one or more pages -EFAULT is
925 * returned. If no memory could be allocated -ENOMEM is returned.
926 * This function establishes missing page table entries.
928 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
929 unsigned long len, int prot)
931 int rc;
933 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
934 return -EINVAL;
935 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
936 return -EINVAL;
937 down_read(&gmap->mm->mmap_sem);
938 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
939 up_read(&gmap->mm->mmap_sem);
940 return rc;
942 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
945 * gmap_read_table - get an unsigned long value from a guest page table using
946 * absolute addressing, without marking the page referenced.
947 * @gmap: pointer to guest mapping meta data structure
948 * @gaddr: virtual address in the guest address space
949 * @val: pointer to the unsigned long value to return
951 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
952 * if reading using the virtual address failed. -EINVAL if called on a gmap
953 * shadow.
955 * Called with gmap->mm->mmap_sem in read.
957 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
959 unsigned long address, vmaddr;
960 spinlock_t *ptl;
961 pte_t *ptep, pte;
962 int rc;
964 if (gmap_is_shadow(gmap))
965 return -EINVAL;
967 while (1) {
968 rc = -EAGAIN;
969 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
970 if (ptep) {
971 pte = *ptep;
972 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
973 address = pte_val(pte) & PAGE_MASK;
974 address += gaddr & ~PAGE_MASK;
975 *val = *(unsigned long *) address;
976 pte_val(*ptep) |= _PAGE_YOUNG;
977 /* Do *NOT* clear the _PAGE_INVALID bit! */
978 rc = 0;
980 gmap_pte_op_end(ptl);
982 if (!rc)
983 break;
984 vmaddr = __gmap_translate(gmap, gaddr);
985 if (IS_ERR_VALUE(vmaddr)) {
986 rc = vmaddr;
987 break;
989 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
990 if (rc)
991 break;
993 return rc;
995 EXPORT_SYMBOL_GPL(gmap_read_table);
998 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
999 * @sg: pointer to the shadow guest address space structure
1000 * @vmaddr: vm address associated with the rmap
1001 * @rmap: pointer to the rmap structure
1003 * Called with the sg->guest_table_lock
1005 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1006 struct gmap_rmap *rmap)
1008 void __rcu **slot;
1010 BUG_ON(!gmap_is_shadow(sg));
1011 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1012 if (slot) {
1013 rmap->next = radix_tree_deref_slot_protected(slot,
1014 &sg->guest_table_lock);
1015 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1016 } else {
1017 rmap->next = NULL;
1018 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1019 rmap);
1024 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1025 * @sg: pointer to the shadow guest address space structure
1026 * @raddr: rmap address in the shadow gmap
1027 * @paddr: address in the parent guest address space
1028 * @len: length of the memory area to protect
1030 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1031 * if out of memory and -EFAULT if paddr is invalid.
1033 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1034 unsigned long paddr, unsigned long len)
1036 struct gmap *parent;
1037 struct gmap_rmap *rmap;
1038 unsigned long vmaddr;
1039 spinlock_t *ptl;
1040 pte_t *ptep;
1041 int rc;
1043 BUG_ON(!gmap_is_shadow(sg));
1044 parent = sg->parent;
1045 while (len) {
1046 vmaddr = __gmap_translate(parent, paddr);
1047 if (IS_ERR_VALUE(vmaddr))
1048 return vmaddr;
1049 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1050 if (!rmap)
1051 return -ENOMEM;
1052 rmap->raddr = raddr;
1053 rc = radix_tree_preload(GFP_KERNEL);
1054 if (rc) {
1055 kfree(rmap);
1056 return rc;
1058 rc = -EAGAIN;
1059 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1060 if (ptep) {
1061 spin_lock(&sg->guest_table_lock);
1062 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1063 PGSTE_VSIE_BIT);
1064 if (!rc)
1065 gmap_insert_rmap(sg, vmaddr, rmap);
1066 spin_unlock(&sg->guest_table_lock);
1067 gmap_pte_op_end(ptl);
1069 radix_tree_preload_end();
1070 if (rc) {
1071 kfree(rmap);
1072 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1073 if (rc)
1074 return rc;
1075 continue;
1077 paddr += PAGE_SIZE;
1078 len -= PAGE_SIZE;
1080 return 0;
1083 #define _SHADOW_RMAP_MASK 0x7
1084 #define _SHADOW_RMAP_REGION1 0x5
1085 #define _SHADOW_RMAP_REGION2 0x4
1086 #define _SHADOW_RMAP_REGION3 0x3
1087 #define _SHADOW_RMAP_SEGMENT 0x2
1088 #define _SHADOW_RMAP_PGTABLE 0x1
1091 * gmap_idte_one - invalidate a single region or segment table entry
1092 * @asce: region or segment table *origin* + table-type bits
1093 * @vaddr: virtual address to identify the table entry to flush
1095 * The invalid bit of a single region or segment table entry is set
1096 * and the associated TLB entries depending on the entry are flushed.
1097 * The table-type of the @asce identifies the portion of the @vaddr
1098 * that is used as the invalidation index.
1100 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1102 asm volatile(
1103 " .insn rrf,0xb98e0000,%0,%1,0,0"
1104 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1108 * gmap_unshadow_page - remove a page from a shadow page table
1109 * @sg: pointer to the shadow guest address space structure
1110 * @raddr: rmap address in the shadow guest address space
1112 * Called with the sg->guest_table_lock
1114 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1116 unsigned long *table;
1118 BUG_ON(!gmap_is_shadow(sg));
1119 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1120 if (!table || *table & _PAGE_INVALID)
1121 return;
1122 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1123 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1127 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1128 * @sg: pointer to the shadow guest address space structure
1129 * @raddr: rmap address in the shadow guest address space
1130 * @pgt: pointer to the start of a shadow page table
1132 * Called with the sg->guest_table_lock
1134 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1135 unsigned long *pgt)
1137 int i;
1139 BUG_ON(!gmap_is_shadow(sg));
1140 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1141 pgt[i] = _PAGE_INVALID;
1145 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1146 * @sg: pointer to the shadow guest address space structure
1147 * @raddr: address in the shadow guest address space
1149 * Called with the sg->guest_table_lock
1151 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1153 unsigned long sto, *ste, *pgt;
1154 struct page *page;
1156 BUG_ON(!gmap_is_shadow(sg));
1157 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1158 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1159 return;
1160 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1161 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1162 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1163 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1164 *ste = _SEGMENT_ENTRY_EMPTY;
1165 __gmap_unshadow_pgt(sg, raddr, pgt);
1166 /* Free page table */
1167 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1168 list_del(&page->lru);
1169 page_table_free_pgste(page);
1173 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1174 * @sg: pointer to the shadow guest address space structure
1175 * @raddr: rmap address in the shadow guest address space
1176 * @sgt: pointer to the start of a shadow segment table
1178 * Called with the sg->guest_table_lock
1180 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1181 unsigned long *sgt)
1183 unsigned long *pgt;
1184 struct page *page;
1185 int i;
1187 BUG_ON(!gmap_is_shadow(sg));
1188 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1189 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1190 continue;
1191 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1192 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1193 __gmap_unshadow_pgt(sg, raddr, pgt);
1194 /* Free page table */
1195 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1196 list_del(&page->lru);
1197 page_table_free_pgste(page);
1202 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1203 * @sg: pointer to the shadow guest address space structure
1204 * @raddr: rmap address in the shadow guest address space
1206 * Called with the shadow->guest_table_lock
1208 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1210 unsigned long r3o, *r3e, *sgt;
1211 struct page *page;
1213 BUG_ON(!gmap_is_shadow(sg));
1214 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1215 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1216 return;
1217 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1218 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1219 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1220 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1221 *r3e = _REGION3_ENTRY_EMPTY;
1222 __gmap_unshadow_sgt(sg, raddr, sgt);
1223 /* Free segment table */
1224 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1225 list_del(&page->lru);
1226 __free_pages(page, CRST_ALLOC_ORDER);
1230 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1231 * @sg: pointer to the shadow guest address space structure
1232 * @raddr: address in the shadow guest address space
1233 * @r3t: pointer to the start of a shadow region-3 table
1235 * Called with the sg->guest_table_lock
1237 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1238 unsigned long *r3t)
1240 unsigned long *sgt;
1241 struct page *page;
1242 int i;
1244 BUG_ON(!gmap_is_shadow(sg));
1245 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1246 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1247 continue;
1248 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1249 r3t[i] = _REGION3_ENTRY_EMPTY;
1250 __gmap_unshadow_sgt(sg, raddr, sgt);
1251 /* Free segment table */
1252 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1253 list_del(&page->lru);
1254 __free_pages(page, CRST_ALLOC_ORDER);
1259 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1260 * @sg: pointer to the shadow guest address space structure
1261 * @raddr: rmap address in the shadow guest address space
1263 * Called with the sg->guest_table_lock
1265 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1267 unsigned long r2o, *r2e, *r3t;
1268 struct page *page;
1270 BUG_ON(!gmap_is_shadow(sg));
1271 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1272 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1273 return;
1274 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1275 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1276 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1277 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1278 *r2e = _REGION2_ENTRY_EMPTY;
1279 __gmap_unshadow_r3t(sg, raddr, r3t);
1280 /* Free region 3 table */
1281 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1282 list_del(&page->lru);
1283 __free_pages(page, CRST_ALLOC_ORDER);
1287 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1288 * @sg: pointer to the shadow guest address space structure
1289 * @raddr: rmap address in the shadow guest address space
1290 * @r2t: pointer to the start of a shadow region-2 table
1292 * Called with the sg->guest_table_lock
1294 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1295 unsigned long *r2t)
1297 unsigned long *r3t;
1298 struct page *page;
1299 int i;
1301 BUG_ON(!gmap_is_shadow(sg));
1302 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1303 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1304 continue;
1305 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1306 r2t[i] = _REGION2_ENTRY_EMPTY;
1307 __gmap_unshadow_r3t(sg, raddr, r3t);
1308 /* Free region 3 table */
1309 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1310 list_del(&page->lru);
1311 __free_pages(page, CRST_ALLOC_ORDER);
1316 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1317 * @sg: pointer to the shadow guest address space structure
1318 * @raddr: rmap address in the shadow guest address space
1320 * Called with the sg->guest_table_lock
1322 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1324 unsigned long r1o, *r1e, *r2t;
1325 struct page *page;
1327 BUG_ON(!gmap_is_shadow(sg));
1328 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1329 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1330 return;
1331 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1332 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1333 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1334 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1335 *r1e = _REGION1_ENTRY_EMPTY;
1336 __gmap_unshadow_r2t(sg, raddr, r2t);
1337 /* Free region 2 table */
1338 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1339 list_del(&page->lru);
1340 __free_pages(page, CRST_ALLOC_ORDER);
1344 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1345 * @sg: pointer to the shadow guest address space structure
1346 * @raddr: rmap address in the shadow guest address space
1347 * @r1t: pointer to the start of a shadow region-1 table
1349 * Called with the shadow->guest_table_lock
1351 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1352 unsigned long *r1t)
1354 unsigned long asce, *r2t;
1355 struct page *page;
1356 int i;
1358 BUG_ON(!gmap_is_shadow(sg));
1359 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1360 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1361 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1362 continue;
1363 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1364 __gmap_unshadow_r2t(sg, raddr, r2t);
1365 /* Clear entry and flush translation r1t -> r2t */
1366 gmap_idte_one(asce, raddr);
1367 r1t[i] = _REGION1_ENTRY_EMPTY;
1368 /* Free region 2 table */
1369 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1370 list_del(&page->lru);
1371 __free_pages(page, CRST_ALLOC_ORDER);
1376 * gmap_unshadow - remove a shadow page table completely
1377 * @sg: pointer to the shadow guest address space structure
1379 * Called with sg->guest_table_lock
1381 static void gmap_unshadow(struct gmap *sg)
1383 unsigned long *table;
1385 BUG_ON(!gmap_is_shadow(sg));
1386 if (sg->removed)
1387 return;
1388 sg->removed = 1;
1389 gmap_call_notifier(sg, 0, -1UL);
1390 gmap_flush_tlb(sg);
1391 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1392 switch (sg->asce & _ASCE_TYPE_MASK) {
1393 case _ASCE_TYPE_REGION1:
1394 __gmap_unshadow_r1t(sg, 0, table);
1395 break;
1396 case _ASCE_TYPE_REGION2:
1397 __gmap_unshadow_r2t(sg, 0, table);
1398 break;
1399 case _ASCE_TYPE_REGION3:
1400 __gmap_unshadow_r3t(sg, 0, table);
1401 break;
1402 case _ASCE_TYPE_SEGMENT:
1403 __gmap_unshadow_sgt(sg, 0, table);
1404 break;
1409 * gmap_find_shadow - find a specific asce in the list of shadow tables
1410 * @parent: pointer to the parent gmap
1411 * @asce: ASCE for which the shadow table is created
1412 * @edat_level: edat level to be used for the shadow translation
1414 * Returns the pointer to a gmap if a shadow table with the given asce is
1415 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1416 * otherwise NULL
1418 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1419 int edat_level)
1421 struct gmap *sg;
1423 list_for_each_entry(sg, &parent->children, list) {
1424 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1425 sg->removed)
1426 continue;
1427 if (!sg->initialized)
1428 return ERR_PTR(-EAGAIN);
1429 atomic_inc(&sg->ref_count);
1430 return sg;
1432 return NULL;
1436 * gmap_shadow_valid - check if a shadow guest address space matches the
1437 * given properties and is still valid
1438 * @sg: pointer to the shadow guest address space structure
1439 * @asce: ASCE for which the shadow table is requested
1440 * @edat_level: edat level to be used for the shadow translation
1442 * Returns 1 if the gmap shadow is still valid and matches the given
1443 * properties, the caller can continue using it. Returns 0 otherwise, the
1444 * caller has to request a new shadow gmap in this case.
1447 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1449 if (sg->removed)
1450 return 0;
1451 return sg->orig_asce == asce && sg->edat_level == edat_level;
1453 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1456 * gmap_shadow - create/find a shadow guest address space
1457 * @parent: pointer to the parent gmap
1458 * @asce: ASCE for which the shadow table is created
1459 * @edat_level: edat level to be used for the shadow translation
1461 * The pages of the top level page table referred by the asce parameter
1462 * will be set to read-only and marked in the PGSTEs of the kvm process.
1463 * The shadow table will be removed automatically on any change to the
1464 * PTE mapping for the source table.
1466 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1467 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1468 * parent gmap table could not be protected.
1470 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1471 int edat_level)
1473 struct gmap *sg, *new;
1474 unsigned long limit;
1475 int rc;
1477 BUG_ON(gmap_is_shadow(parent));
1478 spin_lock(&parent->shadow_lock);
1479 sg = gmap_find_shadow(parent, asce, edat_level);
1480 spin_unlock(&parent->shadow_lock);
1481 if (sg)
1482 return sg;
1483 /* Create a new shadow gmap */
1484 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1485 if (asce & _ASCE_REAL_SPACE)
1486 limit = -1UL;
1487 new = gmap_alloc(limit);
1488 if (!new)
1489 return ERR_PTR(-ENOMEM);
1490 new->mm = parent->mm;
1491 new->parent = gmap_get(parent);
1492 new->orig_asce = asce;
1493 new->edat_level = edat_level;
1494 new->initialized = false;
1495 spin_lock(&parent->shadow_lock);
1496 /* Recheck if another CPU created the same shadow */
1497 sg = gmap_find_shadow(parent, asce, edat_level);
1498 if (sg) {
1499 spin_unlock(&parent->shadow_lock);
1500 gmap_free(new);
1501 return sg;
1503 if (asce & _ASCE_REAL_SPACE) {
1504 /* only allow one real-space gmap shadow */
1505 list_for_each_entry(sg, &parent->children, list) {
1506 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1507 spin_lock(&sg->guest_table_lock);
1508 gmap_unshadow(sg);
1509 spin_unlock(&sg->guest_table_lock);
1510 list_del(&sg->list);
1511 gmap_put(sg);
1512 break;
1516 atomic_set(&new->ref_count, 2);
1517 list_add(&new->list, &parent->children);
1518 if (asce & _ASCE_REAL_SPACE) {
1519 /* nothing to protect, return right away */
1520 new->initialized = true;
1521 spin_unlock(&parent->shadow_lock);
1522 return new;
1524 spin_unlock(&parent->shadow_lock);
1525 /* protect after insertion, so it will get properly invalidated */
1526 down_read(&parent->mm->mmap_sem);
1527 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1528 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1529 PROT_READ, PGSTE_VSIE_BIT);
1530 up_read(&parent->mm->mmap_sem);
1531 spin_lock(&parent->shadow_lock);
1532 new->initialized = true;
1533 if (rc) {
1534 list_del(&new->list);
1535 gmap_free(new);
1536 new = ERR_PTR(rc);
1538 spin_unlock(&parent->shadow_lock);
1539 return new;
1541 EXPORT_SYMBOL_GPL(gmap_shadow);
1544 * gmap_shadow_r2t - create an empty shadow region 2 table
1545 * @sg: pointer to the shadow guest address space structure
1546 * @saddr: faulting address in the shadow gmap
1547 * @r2t: parent gmap address of the region 2 table to get shadowed
1548 * @fake: r2t references contiguous guest memory block, not a r2t
1550 * The r2t parameter specifies the address of the source table. The
1551 * four pages of the source table are made read-only in the parent gmap
1552 * address space. A write to the source table area @r2t will automatically
1553 * remove the shadow r2 table and all of its decendents.
1555 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1556 * shadow table structure is incomplete, -ENOMEM if out of memory and
1557 * -EFAULT if an address in the parent gmap could not be resolved.
1559 * Called with sg->mm->mmap_sem in read.
1561 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1562 int fake)
1564 unsigned long raddr, origin, offset, len;
1565 unsigned long *s_r2t, *table;
1566 struct page *page;
1567 int rc;
1569 BUG_ON(!gmap_is_shadow(sg));
1570 /* Allocate a shadow region second table */
1571 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1572 if (!page)
1573 return -ENOMEM;
1574 page->index = r2t & _REGION_ENTRY_ORIGIN;
1575 if (fake)
1576 page->index |= GMAP_SHADOW_FAKE_TABLE;
1577 s_r2t = (unsigned long *) page_to_phys(page);
1578 /* Install shadow region second table */
1579 spin_lock(&sg->guest_table_lock);
1580 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1581 if (!table) {
1582 rc = -EAGAIN; /* Race with unshadow */
1583 goto out_free;
1585 if (!(*table & _REGION_ENTRY_INVALID)) {
1586 rc = 0; /* Already established */
1587 goto out_free;
1588 } else if (*table & _REGION_ENTRY_ORIGIN) {
1589 rc = -EAGAIN; /* Race with shadow */
1590 goto out_free;
1592 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1593 /* mark as invalid as long as the parent table is not protected */
1594 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1595 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1596 if (sg->edat_level >= 1)
1597 *table |= (r2t & _REGION_ENTRY_PROTECT);
1598 list_add(&page->lru, &sg->crst_list);
1599 if (fake) {
1600 /* nothing to protect for fake tables */
1601 *table &= ~_REGION_ENTRY_INVALID;
1602 spin_unlock(&sg->guest_table_lock);
1603 return 0;
1605 spin_unlock(&sg->guest_table_lock);
1606 /* Make r2t read-only in parent gmap page table */
1607 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1608 origin = r2t & _REGION_ENTRY_ORIGIN;
1609 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1610 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1611 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1612 spin_lock(&sg->guest_table_lock);
1613 if (!rc) {
1614 table = gmap_table_walk(sg, saddr, 4);
1615 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1616 (unsigned long) s_r2t)
1617 rc = -EAGAIN; /* Race with unshadow */
1618 else
1619 *table &= ~_REGION_ENTRY_INVALID;
1620 } else {
1621 gmap_unshadow_r2t(sg, raddr);
1623 spin_unlock(&sg->guest_table_lock);
1624 return rc;
1625 out_free:
1626 spin_unlock(&sg->guest_table_lock);
1627 __free_pages(page, CRST_ALLOC_ORDER);
1628 return rc;
1630 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1633 * gmap_shadow_r3t - create a shadow region 3 table
1634 * @sg: pointer to the shadow guest address space structure
1635 * @saddr: faulting address in the shadow gmap
1636 * @r3t: parent gmap address of the region 3 table to get shadowed
1637 * @fake: r3t references contiguous guest memory block, not a r3t
1639 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1640 * shadow table structure is incomplete, -ENOMEM if out of memory and
1641 * -EFAULT if an address in the parent gmap could not be resolved.
1643 * Called with sg->mm->mmap_sem in read.
1645 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1646 int fake)
1648 unsigned long raddr, origin, offset, len;
1649 unsigned long *s_r3t, *table;
1650 struct page *page;
1651 int rc;
1653 BUG_ON(!gmap_is_shadow(sg));
1654 /* Allocate a shadow region second table */
1655 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1656 if (!page)
1657 return -ENOMEM;
1658 page->index = r3t & _REGION_ENTRY_ORIGIN;
1659 if (fake)
1660 page->index |= GMAP_SHADOW_FAKE_TABLE;
1661 s_r3t = (unsigned long *) page_to_phys(page);
1662 /* Install shadow region second table */
1663 spin_lock(&sg->guest_table_lock);
1664 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1665 if (!table) {
1666 rc = -EAGAIN; /* Race with unshadow */
1667 goto out_free;
1669 if (!(*table & _REGION_ENTRY_INVALID)) {
1670 rc = 0; /* Already established */
1671 goto out_free;
1672 } else if (*table & _REGION_ENTRY_ORIGIN) {
1673 rc = -EAGAIN; /* Race with shadow */
1675 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1676 /* mark as invalid as long as the parent table is not protected */
1677 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1678 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1679 if (sg->edat_level >= 1)
1680 *table |= (r3t & _REGION_ENTRY_PROTECT);
1681 list_add(&page->lru, &sg->crst_list);
1682 if (fake) {
1683 /* nothing to protect for fake tables */
1684 *table &= ~_REGION_ENTRY_INVALID;
1685 spin_unlock(&sg->guest_table_lock);
1686 return 0;
1688 spin_unlock(&sg->guest_table_lock);
1689 /* Make r3t read-only in parent gmap page table */
1690 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1691 origin = r3t & _REGION_ENTRY_ORIGIN;
1692 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1693 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1694 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1695 spin_lock(&sg->guest_table_lock);
1696 if (!rc) {
1697 table = gmap_table_walk(sg, saddr, 3);
1698 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1699 (unsigned long) s_r3t)
1700 rc = -EAGAIN; /* Race with unshadow */
1701 else
1702 *table &= ~_REGION_ENTRY_INVALID;
1703 } else {
1704 gmap_unshadow_r3t(sg, raddr);
1706 spin_unlock(&sg->guest_table_lock);
1707 return rc;
1708 out_free:
1709 spin_unlock(&sg->guest_table_lock);
1710 __free_pages(page, CRST_ALLOC_ORDER);
1711 return rc;
1713 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1716 * gmap_shadow_sgt - create a shadow segment table
1717 * @sg: pointer to the shadow guest address space structure
1718 * @saddr: faulting address in the shadow gmap
1719 * @sgt: parent gmap address of the segment table to get shadowed
1720 * @fake: sgt references contiguous guest memory block, not a sgt
1722 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1723 * shadow table structure is incomplete, -ENOMEM if out of memory and
1724 * -EFAULT if an address in the parent gmap could not be resolved.
1726 * Called with sg->mm->mmap_sem in read.
1728 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1729 int fake)
1731 unsigned long raddr, origin, offset, len;
1732 unsigned long *s_sgt, *table;
1733 struct page *page;
1734 int rc;
1736 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1737 /* Allocate a shadow segment table */
1738 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1739 if (!page)
1740 return -ENOMEM;
1741 page->index = sgt & _REGION_ENTRY_ORIGIN;
1742 if (fake)
1743 page->index |= GMAP_SHADOW_FAKE_TABLE;
1744 s_sgt = (unsigned long *) page_to_phys(page);
1745 /* Install shadow region second table */
1746 spin_lock(&sg->guest_table_lock);
1747 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1748 if (!table) {
1749 rc = -EAGAIN; /* Race with unshadow */
1750 goto out_free;
1752 if (!(*table & _REGION_ENTRY_INVALID)) {
1753 rc = 0; /* Already established */
1754 goto out_free;
1755 } else if (*table & _REGION_ENTRY_ORIGIN) {
1756 rc = -EAGAIN; /* Race with shadow */
1757 goto out_free;
1759 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1760 /* mark as invalid as long as the parent table is not protected */
1761 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1762 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1763 if (sg->edat_level >= 1)
1764 *table |= sgt & _REGION_ENTRY_PROTECT;
1765 list_add(&page->lru, &sg->crst_list);
1766 if (fake) {
1767 /* nothing to protect for fake tables */
1768 *table &= ~_REGION_ENTRY_INVALID;
1769 spin_unlock(&sg->guest_table_lock);
1770 return 0;
1772 spin_unlock(&sg->guest_table_lock);
1773 /* Make sgt read-only in parent gmap page table */
1774 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1775 origin = sgt & _REGION_ENTRY_ORIGIN;
1776 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1777 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1778 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1779 spin_lock(&sg->guest_table_lock);
1780 if (!rc) {
1781 table = gmap_table_walk(sg, saddr, 2);
1782 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1783 (unsigned long) s_sgt)
1784 rc = -EAGAIN; /* Race with unshadow */
1785 else
1786 *table &= ~_REGION_ENTRY_INVALID;
1787 } else {
1788 gmap_unshadow_sgt(sg, raddr);
1790 spin_unlock(&sg->guest_table_lock);
1791 return rc;
1792 out_free:
1793 spin_unlock(&sg->guest_table_lock);
1794 __free_pages(page, CRST_ALLOC_ORDER);
1795 return rc;
1797 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1800 * gmap_shadow_lookup_pgtable - find a shadow page table
1801 * @sg: pointer to the shadow guest address space structure
1802 * @saddr: the address in the shadow aguest address space
1803 * @pgt: parent gmap address of the page table to get shadowed
1804 * @dat_protection: if the pgtable is marked as protected by dat
1805 * @fake: pgt references contiguous guest memory block, not a pgtable
1807 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1808 * table was not found.
1810 * Called with sg->mm->mmap_sem in read.
1812 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1813 unsigned long *pgt, int *dat_protection,
1814 int *fake)
1816 unsigned long *table;
1817 struct page *page;
1818 int rc;
1820 BUG_ON(!gmap_is_shadow(sg));
1821 spin_lock(&sg->guest_table_lock);
1822 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1823 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1824 /* Shadow page tables are full pages (pte+pgste) */
1825 page = pfn_to_page(*table >> PAGE_SHIFT);
1826 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1827 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1828 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1829 rc = 0;
1830 } else {
1831 rc = -EAGAIN;
1833 spin_unlock(&sg->guest_table_lock);
1834 return rc;
1837 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1840 * gmap_shadow_pgt - instantiate a shadow page table
1841 * @sg: pointer to the shadow guest address space structure
1842 * @saddr: faulting address in the shadow gmap
1843 * @pgt: parent gmap address of the page table to get shadowed
1844 * @fake: pgt references contiguous guest memory block, not a pgtable
1846 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1847 * shadow table structure is incomplete, -ENOMEM if out of memory,
1848 * -EFAULT if an address in the parent gmap could not be resolved and
1850 * Called with gmap->mm->mmap_sem in read
1852 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1853 int fake)
1855 unsigned long raddr, origin;
1856 unsigned long *s_pgt, *table;
1857 struct page *page;
1858 int rc;
1860 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
1861 /* Allocate a shadow page table */
1862 page = page_table_alloc_pgste(sg->mm);
1863 if (!page)
1864 return -ENOMEM;
1865 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1866 if (fake)
1867 page->index |= GMAP_SHADOW_FAKE_TABLE;
1868 s_pgt = (unsigned long *) page_to_phys(page);
1869 /* Install shadow page table */
1870 spin_lock(&sg->guest_table_lock);
1871 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1872 if (!table) {
1873 rc = -EAGAIN; /* Race with unshadow */
1874 goto out_free;
1876 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1877 rc = 0; /* Already established */
1878 goto out_free;
1879 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1880 rc = -EAGAIN; /* Race with shadow */
1881 goto out_free;
1883 /* mark as invalid as long as the parent table is not protected */
1884 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1885 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1886 list_add(&page->lru, &sg->pt_list);
1887 if (fake) {
1888 /* nothing to protect for fake tables */
1889 *table &= ~_SEGMENT_ENTRY_INVALID;
1890 spin_unlock(&sg->guest_table_lock);
1891 return 0;
1893 spin_unlock(&sg->guest_table_lock);
1894 /* Make pgt read-only in parent gmap page table (not the pgste) */
1895 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
1896 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1897 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
1898 spin_lock(&sg->guest_table_lock);
1899 if (!rc) {
1900 table = gmap_table_walk(sg, saddr, 1);
1901 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1902 (unsigned long) s_pgt)
1903 rc = -EAGAIN; /* Race with unshadow */
1904 else
1905 *table &= ~_SEGMENT_ENTRY_INVALID;
1906 } else {
1907 gmap_unshadow_pgt(sg, raddr);
1909 spin_unlock(&sg->guest_table_lock);
1910 return rc;
1911 out_free:
1912 spin_unlock(&sg->guest_table_lock);
1913 page_table_free_pgste(page);
1914 return rc;
1917 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1920 * gmap_shadow_page - create a shadow page mapping
1921 * @sg: pointer to the shadow guest address space structure
1922 * @saddr: faulting address in the shadow gmap
1923 * @pte: pte in parent gmap address space to get shadowed
1925 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1926 * shadow table structure is incomplete, -ENOMEM if out of memory and
1927 * -EFAULT if an address in the parent gmap could not be resolved.
1929 * Called with sg->mm->mmap_sem in read.
1931 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1933 struct gmap *parent;
1934 struct gmap_rmap *rmap;
1935 unsigned long vmaddr, paddr;
1936 spinlock_t *ptl;
1937 pte_t *sptep, *tptep;
1938 int prot;
1939 int rc;
1941 BUG_ON(!gmap_is_shadow(sg));
1942 parent = sg->parent;
1943 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
1945 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1946 if (!rmap)
1947 return -ENOMEM;
1948 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1950 while (1) {
1951 paddr = pte_val(pte) & PAGE_MASK;
1952 vmaddr = __gmap_translate(parent, paddr);
1953 if (IS_ERR_VALUE(vmaddr)) {
1954 rc = vmaddr;
1955 break;
1957 rc = radix_tree_preload(GFP_KERNEL);
1958 if (rc)
1959 break;
1960 rc = -EAGAIN;
1961 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1962 if (sptep) {
1963 spin_lock(&sg->guest_table_lock);
1964 /* Get page table pointer */
1965 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1966 if (!tptep) {
1967 spin_unlock(&sg->guest_table_lock);
1968 gmap_pte_op_end(ptl);
1969 radix_tree_preload_end();
1970 break;
1972 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1973 if (rc > 0) {
1974 /* Success and a new mapping */
1975 gmap_insert_rmap(sg, vmaddr, rmap);
1976 rmap = NULL;
1977 rc = 0;
1979 gmap_pte_op_end(ptl);
1980 spin_unlock(&sg->guest_table_lock);
1982 radix_tree_preload_end();
1983 if (!rc)
1984 break;
1985 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1986 if (rc)
1987 break;
1989 kfree(rmap);
1990 return rc;
1992 EXPORT_SYMBOL_GPL(gmap_shadow_page);
1995 * gmap_shadow_notify - handle notifications for shadow gmap
1997 * Called with sg->parent->shadow_lock.
1999 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2000 unsigned long gaddr)
2002 struct gmap_rmap *rmap, *rnext, *head;
2003 unsigned long start, end, bits, raddr;
2005 BUG_ON(!gmap_is_shadow(sg));
2007 spin_lock(&sg->guest_table_lock);
2008 if (sg->removed) {
2009 spin_unlock(&sg->guest_table_lock);
2010 return;
2012 /* Check for top level table */
2013 start = sg->orig_asce & _ASCE_ORIGIN;
2014 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2015 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2016 gaddr < end) {
2017 /* The complete shadow table has to go */
2018 gmap_unshadow(sg);
2019 spin_unlock(&sg->guest_table_lock);
2020 list_del(&sg->list);
2021 gmap_put(sg);
2022 return;
2024 /* Remove the page table tree from on specific entry */
2025 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2026 gmap_for_each_rmap_safe(rmap, rnext, head) {
2027 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2028 raddr = rmap->raddr ^ bits;
2029 switch (bits) {
2030 case _SHADOW_RMAP_REGION1:
2031 gmap_unshadow_r2t(sg, raddr);
2032 break;
2033 case _SHADOW_RMAP_REGION2:
2034 gmap_unshadow_r3t(sg, raddr);
2035 break;
2036 case _SHADOW_RMAP_REGION3:
2037 gmap_unshadow_sgt(sg, raddr);
2038 break;
2039 case _SHADOW_RMAP_SEGMENT:
2040 gmap_unshadow_pgt(sg, raddr);
2041 break;
2042 case _SHADOW_RMAP_PGTABLE:
2043 gmap_unshadow_page(sg, raddr);
2044 break;
2046 kfree(rmap);
2048 spin_unlock(&sg->guest_table_lock);
2052 * ptep_notify - call all invalidation callbacks for a specific pte.
2053 * @mm: pointer to the process mm_struct
2054 * @addr: virtual address in the process address space
2055 * @pte: pointer to the page table entry
2056 * @bits: bits from the pgste that caused the notify call
2058 * This function is assumed to be called with the page table lock held
2059 * for the pte to notify.
2061 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2062 pte_t *pte, unsigned long bits)
2064 unsigned long offset, gaddr = 0;
2065 unsigned long *table;
2066 struct gmap *gmap, *sg, *next;
2068 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2069 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2070 rcu_read_lock();
2071 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2072 spin_lock(&gmap->guest_table_lock);
2073 table = radix_tree_lookup(&gmap->host_to_guest,
2074 vmaddr >> PMD_SHIFT);
2075 if (table)
2076 gaddr = __gmap_segment_gaddr(table) + offset;
2077 spin_unlock(&gmap->guest_table_lock);
2078 if (!table)
2079 continue;
2081 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2082 spin_lock(&gmap->shadow_lock);
2083 list_for_each_entry_safe(sg, next,
2084 &gmap->children, list)
2085 gmap_shadow_notify(sg, vmaddr, gaddr);
2086 spin_unlock(&gmap->shadow_lock);
2088 if (bits & PGSTE_IN_BIT)
2089 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2091 rcu_read_unlock();
2093 EXPORT_SYMBOL_GPL(ptep_notify);
2095 static inline void thp_split_mm(struct mm_struct *mm)
2097 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2098 struct vm_area_struct *vma;
2099 unsigned long addr;
2101 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2102 for (addr = vma->vm_start;
2103 addr < vma->vm_end;
2104 addr += PAGE_SIZE)
2105 follow_page(vma, addr, FOLL_SPLIT);
2106 vma->vm_flags &= ~VM_HUGEPAGE;
2107 vma->vm_flags |= VM_NOHUGEPAGE;
2109 mm->def_flags |= VM_NOHUGEPAGE;
2110 #endif
2114 * Remove all empty zero pages from the mapping for lazy refaulting
2115 * - This must be called after mm->context.has_pgste is set, to avoid
2116 * future creation of zero pages
2117 * - This must be called after THP was enabled
2119 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2120 unsigned long end, struct mm_walk *walk)
2122 unsigned long addr;
2124 for (addr = start; addr != end; addr += PAGE_SIZE) {
2125 pte_t *ptep;
2126 spinlock_t *ptl;
2128 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2129 if (is_zero_pfn(pte_pfn(*ptep)))
2130 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2131 pte_unmap_unlock(ptep, ptl);
2133 return 0;
2136 static inline void zap_zero_pages(struct mm_struct *mm)
2138 struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
2140 walk.mm = mm;
2141 walk_page_range(0, TASK_SIZE, &walk);
2145 * switch on pgstes for its userspace process (for kvm)
2147 int s390_enable_sie(void)
2149 struct mm_struct *mm = current->mm;
2151 /* Do we have pgstes? if yes, we are done */
2152 if (mm_has_pgste(mm))
2153 return 0;
2154 /* Fail if the page tables are 2K */
2155 if (!mm_alloc_pgste(mm))
2156 return -EINVAL;
2157 down_write(&mm->mmap_sem);
2158 mm->context.has_pgste = 1;
2159 /* split thp mappings and disable thp for future mappings */
2160 thp_split_mm(mm);
2161 zap_zero_pages(mm);
2162 up_write(&mm->mmap_sem);
2163 return 0;
2165 EXPORT_SYMBOL_GPL(s390_enable_sie);
2168 * Enable storage key handling from now on and initialize the storage
2169 * keys with the default key.
2171 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2172 unsigned long next, struct mm_walk *walk)
2174 /* Clear storage key */
2175 ptep_zap_key(walk->mm, addr, pte);
2176 return 0;
2179 int s390_enable_skey(void)
2181 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2182 struct mm_struct *mm = current->mm;
2183 struct vm_area_struct *vma;
2184 int rc = 0;
2186 down_write(&mm->mmap_sem);
2187 if (mm_use_skey(mm))
2188 goto out_up;
2190 mm->context.use_skey = 1;
2191 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2192 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2193 MADV_UNMERGEABLE, &vma->vm_flags)) {
2194 mm->context.use_skey = 0;
2195 rc = -ENOMEM;
2196 goto out_up;
2199 mm->def_flags &= ~VM_MERGEABLE;
2201 walk.mm = mm;
2202 walk_page_range(0, TASK_SIZE, &walk);
2204 out_up:
2205 up_write(&mm->mmap_sem);
2206 return rc;
2208 EXPORT_SYMBOL_GPL(s390_enable_skey);
2211 * Reset CMMA state, make all pages stable again.
2213 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2214 unsigned long next, struct mm_walk *walk)
2216 ptep_zap_unused(walk->mm, addr, pte, 1);
2217 return 0;
2220 void s390_reset_cmma(struct mm_struct *mm)
2222 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2224 down_write(&mm->mmap_sem);
2225 walk.mm = mm;
2226 walk_page_range(0, TASK_SIZE, &walk);
2227 up_write(&mm->mmap_sem);
2229 EXPORT_SYMBOL_GPL(s390_reset_cmma);