2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
23 #define GMAP_SHADOW_FAKE_TABLE 1ULL
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
30 * Returns a guest address space structure.
32 static struct gmap
*gmap_alloc(unsigned long limit
)
37 unsigned long etype
, atype
;
39 if (limit
< (1UL << 31)) {
40 limit
= (1UL << 31) - 1;
41 atype
= _ASCE_TYPE_SEGMENT
;
42 etype
= _SEGMENT_ENTRY_EMPTY
;
43 } else if (limit
< (1UL << 42)) {
44 limit
= (1UL << 42) - 1;
45 atype
= _ASCE_TYPE_REGION3
;
46 etype
= _REGION3_ENTRY_EMPTY
;
47 } else if (limit
< (1UL << 53)) {
48 limit
= (1UL << 53) - 1;
49 atype
= _ASCE_TYPE_REGION2
;
50 etype
= _REGION2_ENTRY_EMPTY
;
53 atype
= _ASCE_TYPE_REGION1
;
54 etype
= _REGION1_ENTRY_EMPTY
;
56 gmap
= kzalloc(sizeof(struct gmap
), GFP_KERNEL
);
59 INIT_LIST_HEAD(&gmap
->crst_list
);
60 INIT_LIST_HEAD(&gmap
->children
);
61 INIT_LIST_HEAD(&gmap
->pt_list
);
62 INIT_RADIX_TREE(&gmap
->guest_to_host
, GFP_KERNEL
);
63 INIT_RADIX_TREE(&gmap
->host_to_guest
, GFP_ATOMIC
);
64 INIT_RADIX_TREE(&gmap
->host_to_rmap
, GFP_ATOMIC
);
65 spin_lock_init(&gmap
->guest_table_lock
);
66 spin_lock_init(&gmap
->shadow_lock
);
67 atomic_set(&gmap
->ref_count
, 1);
68 page
= alloc_pages(GFP_KERNEL
, 2);
72 list_add(&page
->lru
, &gmap
->crst_list
);
73 table
= (unsigned long *) page_to_phys(page
);
74 crst_table_init(table
, etype
);
76 gmap
->asce
= atype
| _ASCE_TABLE_LENGTH
|
77 _ASCE_USER_BITS
| __pa(table
);
78 gmap
->asce_end
= limit
;
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
92 * Returns a guest address space structure.
94 struct gmap
*gmap_create(struct mm_struct
*mm
, unsigned long limit
)
98 gmap
= gmap_alloc(limit
);
102 spin_lock(&mm
->context
.gmap_lock
);
103 list_add_rcu(&gmap
->list
, &mm
->context
.gmap_list
);
104 spin_unlock(&mm
->context
.gmap_lock
);
107 EXPORT_SYMBOL_GPL(gmap_create
);
109 static void gmap_flush_tlb(struct gmap
*gmap
)
111 if (MACHINE_HAS_IDTE
)
112 __tlb_flush_idte(gmap
->asce
);
114 __tlb_flush_global();
117 static void gmap_radix_tree_free(struct radix_tree_root
*root
)
119 struct radix_tree_iter iter
;
120 unsigned long indices
[16];
125 /* A radix tree is freed by deleting all of its entries */
129 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
130 indices
[nr
] = iter
.index
;
134 for (i
= 0; i
< nr
; i
++) {
136 radix_tree_delete(root
, index
);
141 static void gmap_rmap_radix_tree_free(struct radix_tree_root
*root
)
143 struct gmap_rmap
*rmap
, *rnext
, *head
;
144 struct radix_tree_iter iter
;
145 unsigned long indices
[16];
150 /* A radix tree is freed by deleting all of its entries */
154 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
155 indices
[nr
] = iter
.index
;
159 for (i
= 0; i
< nr
; i
++) {
161 head
= radix_tree_delete(root
, index
);
162 gmap_for_each_rmap_safe(rmap
, rnext
, head
)
169 * gmap_free - free a guest address space
170 * @gmap: pointer to the guest address space structure
172 * No locks required. There are no references to this gmap anymore.
174 static void gmap_free(struct gmap
*gmap
)
176 struct page
*page
, *next
;
178 /* Flush tlb of all gmaps (if not already done for shadows) */
179 if (!(gmap_is_shadow(gmap
) && gmap
->removed
))
180 gmap_flush_tlb(gmap
);
181 /* Free all segment & region tables. */
182 list_for_each_entry_safe(page
, next
, &gmap
->crst_list
, lru
)
183 __free_pages(page
, 2);
184 gmap_radix_tree_free(&gmap
->guest_to_host
);
185 gmap_radix_tree_free(&gmap
->host_to_guest
);
187 /* Free additional data for a shadow gmap */
188 if (gmap_is_shadow(gmap
)) {
189 /* Free all page tables. */
190 list_for_each_entry_safe(page
, next
, &gmap
->pt_list
, lru
)
191 page_table_free_pgste(page
);
192 gmap_rmap_radix_tree_free(&gmap
->host_to_rmap
);
193 /* Release reference to the parent */
194 gmap_put(gmap
->parent
);
201 * gmap_get - increase reference counter for guest address space
202 * @gmap: pointer to the guest address space structure
204 * Returns the gmap pointer
206 struct gmap
*gmap_get(struct gmap
*gmap
)
208 atomic_inc(&gmap
->ref_count
);
211 EXPORT_SYMBOL_GPL(gmap_get
);
214 * gmap_put - decrease reference counter for guest address space
215 * @gmap: pointer to the guest address space structure
217 * If the reference counter reaches zero the guest address space is freed.
219 void gmap_put(struct gmap
*gmap
)
221 if (atomic_dec_return(&gmap
->ref_count
) == 0)
224 EXPORT_SYMBOL_GPL(gmap_put
);
227 * gmap_remove - remove a guest address space but do not free it yet
228 * @gmap: pointer to the guest address space structure
230 void gmap_remove(struct gmap
*gmap
)
232 struct gmap
*sg
, *next
;
234 /* Remove all shadow gmaps linked to this gmap */
235 if (!list_empty(&gmap
->children
)) {
236 spin_lock(&gmap
->shadow_lock
);
237 list_for_each_entry_safe(sg
, next
, &gmap
->children
, list
) {
241 spin_unlock(&gmap
->shadow_lock
);
243 /* Remove gmap from the pre-mm list */
244 spin_lock(&gmap
->mm
->context
.gmap_lock
);
245 list_del_rcu(&gmap
->list
);
246 spin_unlock(&gmap
->mm
->context
.gmap_lock
);
251 EXPORT_SYMBOL_GPL(gmap_remove
);
254 * gmap_enable - switch primary space to the guest address space
255 * @gmap: pointer to the guest address space structure
257 void gmap_enable(struct gmap
*gmap
)
259 S390_lowcore
.gmap
= (unsigned long) gmap
;
261 EXPORT_SYMBOL_GPL(gmap_enable
);
264 * gmap_disable - switch back to the standard primary address space
265 * @gmap: pointer to the guest address space structure
267 void gmap_disable(struct gmap
*gmap
)
269 S390_lowcore
.gmap
= 0UL;
271 EXPORT_SYMBOL_GPL(gmap_disable
);
274 * gmap_get_enabled - get a pointer to the currently enabled gmap
276 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
278 struct gmap
*gmap_get_enabled(void)
280 return (struct gmap
*) S390_lowcore
.gmap
;
282 EXPORT_SYMBOL_GPL(gmap_get_enabled
);
285 * gmap_alloc_table is assumed to be called with mmap_sem held
287 static int gmap_alloc_table(struct gmap
*gmap
, unsigned long *table
,
288 unsigned long init
, unsigned long gaddr
)
293 /* since we dont free the gmap table until gmap_free we can unlock */
294 page
= alloc_pages(GFP_KERNEL
, 2);
297 new = (unsigned long *) page_to_phys(page
);
298 crst_table_init(new, init
);
299 spin_lock(&gmap
->guest_table_lock
);
300 if (*table
& _REGION_ENTRY_INVALID
) {
301 list_add(&page
->lru
, &gmap
->crst_list
);
302 *table
= (unsigned long) new | _REGION_ENTRY_LENGTH
|
303 (*table
& _REGION_ENTRY_TYPE_MASK
);
307 spin_unlock(&gmap
->guest_table_lock
);
309 __free_pages(page
, 2);
314 * __gmap_segment_gaddr - find virtual address from segment pointer
315 * @entry: pointer to a segment table entry in the guest address space
317 * Returns the virtual address in the guest address space for the segment
319 static unsigned long __gmap_segment_gaddr(unsigned long *entry
)
322 unsigned long offset
, mask
;
324 offset
= (unsigned long) entry
/ sizeof(unsigned long);
325 offset
= (offset
& (PTRS_PER_PMD
- 1)) * PMD_SIZE
;
326 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
327 page
= virt_to_page((void *)((unsigned long) entry
& mask
));
328 return page
->index
+ offset
;
332 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
333 * @gmap: pointer to the guest address space structure
334 * @vmaddr: address in the host process address space
336 * Returns 1 if a TLB flush is required
338 static int __gmap_unlink_by_vmaddr(struct gmap
*gmap
, unsigned long vmaddr
)
340 unsigned long *entry
;
343 BUG_ON(gmap_is_shadow(gmap
));
344 spin_lock(&gmap
->guest_table_lock
);
345 entry
= radix_tree_delete(&gmap
->host_to_guest
, vmaddr
>> PMD_SHIFT
);
347 flush
= (*entry
!= _SEGMENT_ENTRY_INVALID
);
348 *entry
= _SEGMENT_ENTRY_INVALID
;
350 spin_unlock(&gmap
->guest_table_lock
);
355 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
356 * @gmap: pointer to the guest address space structure
357 * @gaddr: address in the guest address space
359 * Returns 1 if a TLB flush is required
361 static int __gmap_unmap_by_gaddr(struct gmap
*gmap
, unsigned long gaddr
)
363 unsigned long vmaddr
;
365 vmaddr
= (unsigned long) radix_tree_delete(&gmap
->guest_to_host
,
367 return vmaddr
? __gmap_unlink_by_vmaddr(gmap
, vmaddr
) : 0;
371 * gmap_unmap_segment - unmap segment from the guest address space
372 * @gmap: pointer to the guest address space structure
373 * @to: address in the guest address space
374 * @len: length of the memory area to unmap
376 * Returns 0 if the unmap succeeded, -EINVAL if not.
378 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
)
383 BUG_ON(gmap_is_shadow(gmap
));
384 if ((to
| len
) & (PMD_SIZE
- 1))
386 if (len
== 0 || to
+ len
< to
)
390 down_write(&gmap
->mm
->mmap_sem
);
391 for (off
= 0; off
< len
; off
+= PMD_SIZE
)
392 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
393 up_write(&gmap
->mm
->mmap_sem
);
395 gmap_flush_tlb(gmap
);
398 EXPORT_SYMBOL_GPL(gmap_unmap_segment
);
401 * gmap_map_segment - map a segment to the guest address space
402 * @gmap: pointer to the guest address space structure
403 * @from: source address in the parent address space
404 * @to: target address in the guest address space
405 * @len: length of the memory area to map
407 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
409 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
410 unsigned long to
, unsigned long len
)
415 BUG_ON(gmap_is_shadow(gmap
));
416 if ((from
| to
| len
) & (PMD_SIZE
- 1))
418 if (len
== 0 || from
+ len
< from
|| to
+ len
< to
||
419 from
+ len
- 1 > TASK_MAX_SIZE
|| to
+ len
- 1 > gmap
->asce_end
)
423 down_write(&gmap
->mm
->mmap_sem
);
424 for (off
= 0; off
< len
; off
+= PMD_SIZE
) {
425 /* Remove old translation */
426 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
427 /* Store new translation */
428 if (radix_tree_insert(&gmap
->guest_to_host
,
429 (to
+ off
) >> PMD_SHIFT
,
430 (void *) from
+ off
))
433 up_write(&gmap
->mm
->mmap_sem
);
435 gmap_flush_tlb(gmap
);
438 gmap_unmap_segment(gmap
, to
, len
);
441 EXPORT_SYMBOL_GPL(gmap_map_segment
);
444 * __gmap_translate - translate a guest address to a user space address
445 * @gmap: pointer to guest mapping meta data structure
446 * @gaddr: guest address
448 * Returns user space address which corresponds to the guest address or
449 * -EFAULT if no such mapping exists.
450 * This function does not establish potentially missing page table entries.
451 * The mmap_sem of the mm that belongs to the address space must be held
452 * when this function gets called.
454 * Note: Can also be called for shadow gmaps.
456 unsigned long __gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
458 unsigned long vmaddr
;
460 vmaddr
= (unsigned long)
461 radix_tree_lookup(&gmap
->guest_to_host
, gaddr
>> PMD_SHIFT
);
462 /* Note: guest_to_host is empty for a shadow gmap */
463 return vmaddr
? (vmaddr
| (gaddr
& ~PMD_MASK
)) : -EFAULT
;
465 EXPORT_SYMBOL_GPL(__gmap_translate
);
468 * gmap_translate - translate a guest address to a user space address
469 * @gmap: pointer to guest mapping meta data structure
470 * @gaddr: guest address
472 * Returns user space address which corresponds to the guest address or
473 * -EFAULT if no such mapping exists.
474 * This function does not establish potentially missing page table entries.
476 unsigned long gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
480 down_read(&gmap
->mm
->mmap_sem
);
481 rc
= __gmap_translate(gmap
, gaddr
);
482 up_read(&gmap
->mm
->mmap_sem
);
485 EXPORT_SYMBOL_GPL(gmap_translate
);
488 * gmap_unlink - disconnect a page table from the gmap shadow tables
489 * @gmap: pointer to guest mapping meta data structure
490 * @table: pointer to the host page table
491 * @vmaddr: vm address associated with the host page table
493 void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
494 unsigned long vmaddr
)
500 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
501 flush
= __gmap_unlink_by_vmaddr(gmap
, vmaddr
);
503 gmap_flush_tlb(gmap
);
509 * gmap_link - set up shadow page tables to connect a host to a guest address
510 * @gmap: pointer to guest mapping meta data structure
511 * @gaddr: guest address
512 * @vmaddr: vm address
514 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
515 * if the vm address is already mapped to a different guest segment.
516 * The mmap_sem of the mm that belongs to the address space must be held
517 * when this function gets called.
519 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
)
521 struct mm_struct
*mm
;
522 unsigned long *table
;
529 BUG_ON(gmap_is_shadow(gmap
));
530 /* Create higher level tables in the gmap page table */
532 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION1
) {
533 table
+= (gaddr
>> 53) & 0x7ff;
534 if ((*table
& _REGION_ENTRY_INVALID
) &&
535 gmap_alloc_table(gmap
, table
, _REGION2_ENTRY_EMPTY
,
536 gaddr
& 0xffe0000000000000UL
))
538 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
540 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION2
) {
541 table
+= (gaddr
>> 42) & 0x7ff;
542 if ((*table
& _REGION_ENTRY_INVALID
) &&
543 gmap_alloc_table(gmap
, table
, _REGION3_ENTRY_EMPTY
,
544 gaddr
& 0xfffffc0000000000UL
))
546 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
548 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION3
) {
549 table
+= (gaddr
>> 31) & 0x7ff;
550 if ((*table
& _REGION_ENTRY_INVALID
) &&
551 gmap_alloc_table(gmap
, table
, _SEGMENT_ENTRY_EMPTY
,
552 gaddr
& 0xffffffff80000000UL
))
554 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
556 table
+= (gaddr
>> 20) & 0x7ff;
557 /* Walk the parent mm page table */
559 pgd
= pgd_offset(mm
, vmaddr
);
560 VM_BUG_ON(pgd_none(*pgd
));
561 pud
= pud_offset(pgd
, vmaddr
);
562 VM_BUG_ON(pud_none(*pud
));
563 /* large puds cannot yet be handled */
566 pmd
= pmd_offset(pud
, vmaddr
);
567 VM_BUG_ON(pmd_none(*pmd
));
568 /* large pmds cannot yet be handled */
571 /* Link gmap segment table entry location to page table. */
572 rc
= radix_tree_preload(GFP_KERNEL
);
575 ptl
= pmd_lock(mm
, pmd
);
576 spin_lock(&gmap
->guest_table_lock
);
577 if (*table
== _SEGMENT_ENTRY_INVALID
) {
578 rc
= radix_tree_insert(&gmap
->host_to_guest
,
579 vmaddr
>> PMD_SHIFT
, table
);
581 *table
= pmd_val(*pmd
);
584 spin_unlock(&gmap
->guest_table_lock
);
586 radix_tree_preload_end();
591 * gmap_fault - resolve a fault on a guest address
592 * @gmap: pointer to guest mapping meta data structure
593 * @gaddr: guest address
594 * @fault_flags: flags to pass down to handle_mm_fault()
596 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
597 * if the vm address is already mapped to a different guest segment.
599 int gmap_fault(struct gmap
*gmap
, unsigned long gaddr
,
600 unsigned int fault_flags
)
602 unsigned long vmaddr
;
606 down_read(&gmap
->mm
->mmap_sem
);
610 vmaddr
= __gmap_translate(gmap
, gaddr
);
611 if (IS_ERR_VALUE(vmaddr
)) {
615 if (fixup_user_fault(current
, gmap
->mm
, vmaddr
, fault_flags
,
621 * In the case that fixup_user_fault unlocked the mmap_sem during
622 * faultin redo __gmap_translate to not race with a map/unmap_segment.
627 rc
= __gmap_link(gmap
, gaddr
, vmaddr
);
629 up_read(&gmap
->mm
->mmap_sem
);
632 EXPORT_SYMBOL_GPL(gmap_fault
);
635 * this function is assumed to be called with mmap_sem held
637 void __gmap_zap(struct gmap
*gmap
, unsigned long gaddr
)
639 unsigned long vmaddr
;
643 /* Find the vm address for the guest address */
644 vmaddr
= (unsigned long) radix_tree_lookup(&gmap
->guest_to_host
,
647 vmaddr
|= gaddr
& ~PMD_MASK
;
648 /* Get pointer to the page table entry */
649 ptep
= get_locked_pte(gmap
->mm
, vmaddr
, &ptl
);
651 ptep_zap_unused(gmap
->mm
, vmaddr
, ptep
, 0);
652 pte_unmap_unlock(ptep
, ptl
);
655 EXPORT_SYMBOL_GPL(__gmap_zap
);
657 void gmap_discard(struct gmap
*gmap
, unsigned long from
, unsigned long to
)
659 unsigned long gaddr
, vmaddr
, size
;
660 struct vm_area_struct
*vma
;
662 down_read(&gmap
->mm
->mmap_sem
);
663 for (gaddr
= from
; gaddr
< to
;
664 gaddr
= (gaddr
+ PMD_SIZE
) & PMD_MASK
) {
665 /* Find the vm address for the guest address */
666 vmaddr
= (unsigned long)
667 radix_tree_lookup(&gmap
->guest_to_host
,
671 vmaddr
|= gaddr
& ~PMD_MASK
;
672 /* Find vma in the parent mm */
673 vma
= find_vma(gmap
->mm
, vmaddr
);
674 size
= min(to
- gaddr
, PMD_SIZE
- (gaddr
& ~PMD_MASK
));
675 zap_page_range(vma
, vmaddr
, size
, NULL
);
677 up_read(&gmap
->mm
->mmap_sem
);
679 EXPORT_SYMBOL_GPL(gmap_discard
);
681 static LIST_HEAD(gmap_notifier_list
);
682 static DEFINE_SPINLOCK(gmap_notifier_lock
);
685 * gmap_register_pte_notifier - register a pte invalidation callback
686 * @nb: pointer to the gmap notifier block
688 void gmap_register_pte_notifier(struct gmap_notifier
*nb
)
690 spin_lock(&gmap_notifier_lock
);
691 list_add_rcu(&nb
->list
, &gmap_notifier_list
);
692 spin_unlock(&gmap_notifier_lock
);
694 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier
);
697 * gmap_unregister_pte_notifier - remove a pte invalidation callback
698 * @nb: pointer to the gmap notifier block
700 void gmap_unregister_pte_notifier(struct gmap_notifier
*nb
)
702 spin_lock(&gmap_notifier_lock
);
703 list_del_rcu(&nb
->list
);
704 spin_unlock(&gmap_notifier_lock
);
707 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier
);
710 * gmap_call_notifier - call all registered invalidation callbacks
711 * @gmap: pointer to guest mapping meta data structure
712 * @start: start virtual address in the guest address space
713 * @end: end virtual address in the guest address space
715 static void gmap_call_notifier(struct gmap
*gmap
, unsigned long start
,
718 struct gmap_notifier
*nb
;
720 list_for_each_entry(nb
, &gmap_notifier_list
, list
)
721 nb
->notifier_call(gmap
, start
, end
);
725 * gmap_table_walk - walk the gmap page tables
726 * @gmap: pointer to guest mapping meta data structure
727 * @gaddr: virtual address in the guest address space
728 * @level: page table level to stop at
730 * Returns a table entry pointer for the given guest address and @level
731 * @level=0 : returns a pointer to a page table table entry (or NULL)
732 * @level=1 : returns a pointer to a segment table entry (or NULL)
733 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
734 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
735 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
737 * Returns NULL if the gmap page tables could not be walked to the
740 * Note: Can also be called for shadow gmaps.
742 static inline unsigned long *gmap_table_walk(struct gmap
*gmap
,
743 unsigned long gaddr
, int level
)
745 unsigned long *table
;
747 if ((gmap
->asce
& _ASCE_TYPE_MASK
) + 4 < (level
* 4))
749 if (gmap_is_shadow(gmap
) && gmap
->removed
)
751 if (gaddr
& (-1UL << (31 + ((gmap
->asce
& _ASCE_TYPE_MASK
) >> 2)*11)))
754 switch (gmap
->asce
& _ASCE_TYPE_MASK
) {
755 case _ASCE_TYPE_REGION1
:
756 table
+= (gaddr
>> 53) & 0x7ff;
759 if (*table
& _REGION_ENTRY_INVALID
)
761 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
763 case _ASCE_TYPE_REGION2
:
764 table
+= (gaddr
>> 42) & 0x7ff;
767 if (*table
& _REGION_ENTRY_INVALID
)
769 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
771 case _ASCE_TYPE_REGION3
:
772 table
+= (gaddr
>> 31) & 0x7ff;
775 if (*table
& _REGION_ENTRY_INVALID
)
777 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
779 case _ASCE_TYPE_SEGMENT
:
780 table
+= (gaddr
>> 20) & 0x7ff;
783 if (*table
& _REGION_ENTRY_INVALID
)
785 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
786 table
+= (gaddr
>> 12) & 0xff;
792 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
793 * and return the pte pointer
794 * @gmap: pointer to guest mapping meta data structure
795 * @gaddr: virtual address in the guest address space
796 * @ptl: pointer to the spinlock pointer
798 * Returns a pointer to the locked pte for a guest address, or NULL
800 * Note: Can also be called for shadow gmaps.
802 static pte_t
*gmap_pte_op_walk(struct gmap
*gmap
, unsigned long gaddr
,
805 unsigned long *table
;
807 if (gmap_is_shadow(gmap
))
808 spin_lock(&gmap
->guest_table_lock
);
809 /* Walk the gmap page table, lock and get pte pointer */
810 table
= gmap_table_walk(gmap
, gaddr
, 1); /* get segment pointer */
811 if (!table
|| *table
& _SEGMENT_ENTRY_INVALID
) {
812 if (gmap_is_shadow(gmap
))
813 spin_unlock(&gmap
->guest_table_lock
);
816 if (gmap_is_shadow(gmap
)) {
817 *ptl
= &gmap
->guest_table_lock
;
818 return pte_offset_map((pmd_t
*) table
, gaddr
);
820 return pte_alloc_map_lock(gmap
->mm
, (pmd_t
*) table
, gaddr
, ptl
);
824 * gmap_pte_op_fixup - force a page in and connect the gmap page table
825 * @gmap: pointer to guest mapping meta data structure
826 * @gaddr: virtual address in the guest address space
827 * @vmaddr: address in the host process address space
828 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
830 * Returns 0 if the caller can retry __gmap_translate (might fail again),
831 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
832 * up or connecting the gmap page table.
834 static int gmap_pte_op_fixup(struct gmap
*gmap
, unsigned long gaddr
,
835 unsigned long vmaddr
, int prot
)
837 struct mm_struct
*mm
= gmap
->mm
;
838 unsigned int fault_flags
;
839 bool unlocked
= false;
841 BUG_ON(gmap_is_shadow(gmap
));
842 fault_flags
= (prot
== PROT_WRITE
) ? FAULT_FLAG_WRITE
: 0;
843 if (fixup_user_fault(current
, mm
, vmaddr
, fault_flags
, &unlocked
))
846 /* lost mmap_sem, caller has to retry __gmap_translate */
848 /* Connect the page tables */
849 return __gmap_link(gmap
, gaddr
, vmaddr
);
853 * gmap_pte_op_end - release the page table lock
854 * @ptl: pointer to the spinlock pointer
856 static void gmap_pte_op_end(spinlock_t
*ptl
)
862 * gmap_protect_range - remove access rights to memory and set pgste bits
863 * @gmap: pointer to guest mapping meta data structure
864 * @gaddr: virtual address in the guest address space
866 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
867 * @bits: pgste notification bits to set
869 * Returns 0 if successfully protected, -ENOMEM if out of memory and
870 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
872 * Called with sg->mm->mmap_sem in read.
874 * Note: Can also be called for shadow gmaps.
876 static int gmap_protect_range(struct gmap
*gmap
, unsigned long gaddr
,
877 unsigned long len
, int prot
, unsigned long bits
)
879 unsigned long vmaddr
;
886 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
888 rc
= ptep_force_prot(gmap
->mm
, gaddr
, ptep
, prot
, bits
);
889 gmap_pte_op_end(ptl
);
892 vmaddr
= __gmap_translate(gmap
, gaddr
);
893 if (IS_ERR_VALUE(vmaddr
))
895 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
, prot
);
907 * gmap_mprotect_notify - change access rights for a range of ptes and
908 * call the notifier if any pte changes again
909 * @gmap: pointer to guest mapping meta data structure
910 * @gaddr: virtual address in the guest address space
912 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
914 * Returns 0 if for each page in the given range a gmap mapping exists,
915 * the new access rights could be set and the notifier could be armed.
916 * If the gmap mapping is missing for one or more pages -EFAULT is
917 * returned. If no memory could be allocated -ENOMEM is returned.
918 * This function establishes missing page table entries.
920 int gmap_mprotect_notify(struct gmap
*gmap
, unsigned long gaddr
,
921 unsigned long len
, int prot
)
925 if ((gaddr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) || gmap_is_shadow(gmap
))
927 if (!MACHINE_HAS_ESOP
&& prot
== PROT_READ
)
929 down_read(&gmap
->mm
->mmap_sem
);
930 rc
= gmap_protect_range(gmap
, gaddr
, len
, prot
, PGSTE_IN_BIT
);
931 up_read(&gmap
->mm
->mmap_sem
);
934 EXPORT_SYMBOL_GPL(gmap_mprotect_notify
);
937 * gmap_read_table - get an unsigned long value from a guest page table using
938 * absolute addressing, without marking the page referenced.
939 * @gmap: pointer to guest mapping meta data structure
940 * @gaddr: virtual address in the guest address space
941 * @val: pointer to the unsigned long value to return
943 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
944 * if reading using the virtual address failed.
946 * Called with gmap->mm->mmap_sem in read.
948 int gmap_read_table(struct gmap
*gmap
, unsigned long gaddr
, unsigned long *val
)
950 unsigned long address
, vmaddr
;
957 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
960 if (pte_present(pte
) && (pte_val(pte
) & _PAGE_READ
)) {
961 address
= pte_val(pte
) & PAGE_MASK
;
962 address
+= gaddr
& ~PAGE_MASK
;
963 *val
= *(unsigned long *) address
;
964 pte_val(*ptep
) |= _PAGE_YOUNG
;
965 /* Do *NOT* clear the _PAGE_INVALID bit! */
968 gmap_pte_op_end(ptl
);
972 vmaddr
= __gmap_translate(gmap
, gaddr
);
973 if (IS_ERR_VALUE(vmaddr
)) {
977 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
, PROT_READ
);
983 EXPORT_SYMBOL_GPL(gmap_read_table
);
986 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
987 * @sg: pointer to the shadow guest address space structure
988 * @vmaddr: vm address associated with the rmap
989 * @rmap: pointer to the rmap structure
991 * Called with the sg->guest_table_lock
993 static inline void gmap_insert_rmap(struct gmap
*sg
, unsigned long vmaddr
,
994 struct gmap_rmap
*rmap
)
998 BUG_ON(!gmap_is_shadow(sg
));
999 slot
= radix_tree_lookup_slot(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
);
1001 rmap
->next
= radix_tree_deref_slot_protected(slot
,
1002 &sg
->guest_table_lock
);
1003 radix_tree_replace_slot(slot
, rmap
);
1006 radix_tree_insert(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
,
1012 * gmap_protect_rmap - modify access rights to memory and create an rmap
1013 * @sg: pointer to the shadow guest address space structure
1014 * @raddr: rmap address in the shadow gmap
1015 * @paddr: address in the parent guest address space
1016 * @len: length of the memory area to protect
1017 * @prot: indicates access rights: none, read-only or read-write
1019 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1020 * if out of memory and -EFAULT if paddr is invalid.
1022 static int gmap_protect_rmap(struct gmap
*sg
, unsigned long raddr
,
1023 unsigned long paddr
, unsigned long len
, int prot
)
1025 struct gmap
*parent
;
1026 struct gmap_rmap
*rmap
;
1027 unsigned long vmaddr
;
1032 BUG_ON(!gmap_is_shadow(sg
));
1033 parent
= sg
->parent
;
1035 vmaddr
= __gmap_translate(parent
, paddr
);
1036 if (IS_ERR_VALUE(vmaddr
))
1038 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1041 rmap
->raddr
= raddr
;
1042 rc
= radix_tree_preload(GFP_KERNEL
);
1048 ptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1050 spin_lock(&sg
->guest_table_lock
);
1051 rc
= ptep_force_prot(parent
->mm
, paddr
, ptep
, prot
,
1054 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1055 spin_unlock(&sg
->guest_table_lock
);
1056 gmap_pte_op_end(ptl
);
1058 radix_tree_preload_end();
1061 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
, prot
);
1072 #define _SHADOW_RMAP_MASK 0x7
1073 #define _SHADOW_RMAP_REGION1 0x5
1074 #define _SHADOW_RMAP_REGION2 0x4
1075 #define _SHADOW_RMAP_REGION3 0x3
1076 #define _SHADOW_RMAP_SEGMENT 0x2
1077 #define _SHADOW_RMAP_PGTABLE 0x1
1080 * gmap_idte_one - invalidate a single region or segment table entry
1081 * @asce: region or segment table *origin* + table-type bits
1082 * @vaddr: virtual address to identify the table entry to flush
1084 * The invalid bit of a single region or segment table entry is set
1085 * and the associated TLB entries depending on the entry are flushed.
1086 * The table-type of the @asce identifies the portion of the @vaddr
1087 * that is used as the invalidation index.
1089 static inline void gmap_idte_one(unsigned long asce
, unsigned long vaddr
)
1092 " .insn rrf,0xb98e0000,%0,%1,0,0"
1093 : : "a" (asce
), "a" (vaddr
) : "cc", "memory");
1097 * gmap_unshadow_page - remove a page from a shadow page table
1098 * @sg: pointer to the shadow guest address space structure
1099 * @raddr: rmap address in the shadow guest address space
1101 * Called with the sg->guest_table_lock
1103 static void gmap_unshadow_page(struct gmap
*sg
, unsigned long raddr
)
1105 unsigned long *table
;
1107 BUG_ON(!gmap_is_shadow(sg
));
1108 table
= gmap_table_walk(sg
, raddr
, 0); /* get page table pointer */
1109 if (!table
|| *table
& _PAGE_INVALID
)
1111 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 12) - 1);
1112 ptep_unshadow_pte(sg
->mm
, raddr
, (pte_t
*) table
);
1116 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1117 * @sg: pointer to the shadow guest address space structure
1118 * @raddr: rmap address in the shadow guest address space
1119 * @pgt: pointer to the start of a shadow page table
1121 * Called with the sg->guest_table_lock
1123 static void __gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
,
1128 BUG_ON(!gmap_is_shadow(sg
));
1129 for (i
= 0; i
< 256; i
++, raddr
+= 1UL << 12)
1130 pgt
[i
] = _PAGE_INVALID
;
1134 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1135 * @sg: pointer to the shadow guest address space structure
1136 * @raddr: address in the shadow guest address space
1138 * Called with the sg->guest_table_lock
1140 static void gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
)
1142 unsigned long sto
, *ste
, *pgt
;
1145 BUG_ON(!gmap_is_shadow(sg
));
1146 ste
= gmap_table_walk(sg
, raddr
, 1); /* get segment pointer */
1147 if (!ste
|| !(*ste
& _SEGMENT_ENTRY_ORIGIN
))
1149 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 20) - 1);
1150 sto
= (unsigned long) (ste
- ((raddr
>> 20) & 0x7ff));
1151 gmap_idte_one(sto
| _ASCE_TYPE_SEGMENT
, raddr
);
1152 pgt
= (unsigned long *)(*ste
& _SEGMENT_ENTRY_ORIGIN
);
1153 *ste
= _SEGMENT_ENTRY_EMPTY
;
1154 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1155 /* Free page table */
1156 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1157 list_del(&page
->lru
);
1158 page_table_free_pgste(page
);
1162 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1163 * @sg: pointer to the shadow guest address space structure
1164 * @raddr: rmap address in the shadow guest address space
1165 * @sgt: pointer to the start of a shadow segment table
1167 * Called with the sg->guest_table_lock
1169 static void __gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
,
1172 unsigned long asce
, *pgt
;
1176 BUG_ON(!gmap_is_shadow(sg
));
1177 asce
= (unsigned long) sgt
| _ASCE_TYPE_SEGMENT
;
1178 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 20) {
1179 if (!(sgt
[i
] & _SEGMENT_ENTRY_ORIGIN
))
1181 pgt
= (unsigned long *)(sgt
[i
] & _REGION_ENTRY_ORIGIN
);
1182 sgt
[i
] = _SEGMENT_ENTRY_EMPTY
;
1183 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1184 /* Free page table */
1185 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1186 list_del(&page
->lru
);
1187 page_table_free_pgste(page
);
1192 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1193 * @sg: pointer to the shadow guest address space structure
1194 * @raddr: rmap address in the shadow guest address space
1196 * Called with the shadow->guest_table_lock
1198 static void gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
)
1200 unsigned long r3o
, *r3e
, *sgt
;
1203 BUG_ON(!gmap_is_shadow(sg
));
1204 r3e
= gmap_table_walk(sg
, raddr
, 2); /* get region-3 pointer */
1205 if (!r3e
|| !(*r3e
& _REGION_ENTRY_ORIGIN
))
1207 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 31) - 1);
1208 r3o
= (unsigned long) (r3e
- ((raddr
>> 31) & 0x7ff));
1209 gmap_idte_one(r3o
| _ASCE_TYPE_REGION3
, raddr
);
1210 sgt
= (unsigned long *)(*r3e
& _REGION_ENTRY_ORIGIN
);
1211 *r3e
= _REGION3_ENTRY_EMPTY
;
1212 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1213 /* Free segment table */
1214 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1215 list_del(&page
->lru
);
1216 __free_pages(page
, 2);
1220 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1221 * @sg: pointer to the shadow guest address space structure
1222 * @raddr: address in the shadow guest address space
1223 * @r3t: pointer to the start of a shadow region-3 table
1225 * Called with the sg->guest_table_lock
1227 static void __gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
,
1230 unsigned long asce
, *sgt
;
1234 BUG_ON(!gmap_is_shadow(sg
));
1235 asce
= (unsigned long) r3t
| _ASCE_TYPE_REGION3
;
1236 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 31) {
1237 if (!(r3t
[i
] & _REGION_ENTRY_ORIGIN
))
1239 sgt
= (unsigned long *)(r3t
[i
] & _REGION_ENTRY_ORIGIN
);
1240 r3t
[i
] = _REGION3_ENTRY_EMPTY
;
1241 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1242 /* Free segment table */
1243 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1244 list_del(&page
->lru
);
1245 __free_pages(page
, 2);
1250 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1251 * @sg: pointer to the shadow guest address space structure
1252 * @raddr: rmap address in the shadow guest address space
1254 * Called with the sg->guest_table_lock
1256 static void gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
)
1258 unsigned long r2o
, *r2e
, *r3t
;
1261 BUG_ON(!gmap_is_shadow(sg
));
1262 r2e
= gmap_table_walk(sg
, raddr
, 3); /* get region-2 pointer */
1263 if (!r2e
|| !(*r2e
& _REGION_ENTRY_ORIGIN
))
1265 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 42) - 1);
1266 r2o
= (unsigned long) (r2e
- ((raddr
>> 42) & 0x7ff));
1267 gmap_idte_one(r2o
| _ASCE_TYPE_REGION2
, raddr
);
1268 r3t
= (unsigned long *)(*r2e
& _REGION_ENTRY_ORIGIN
);
1269 *r2e
= _REGION2_ENTRY_EMPTY
;
1270 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1271 /* Free region 3 table */
1272 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1273 list_del(&page
->lru
);
1274 __free_pages(page
, 2);
1278 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1279 * @sg: pointer to the shadow guest address space structure
1280 * @raddr: rmap address in the shadow guest address space
1281 * @r2t: pointer to the start of a shadow region-2 table
1283 * Called with the sg->guest_table_lock
1285 static void __gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
,
1288 unsigned long asce
, *r3t
;
1292 BUG_ON(!gmap_is_shadow(sg
));
1293 asce
= (unsigned long) r2t
| _ASCE_TYPE_REGION2
;
1294 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 42) {
1295 if (!(r2t
[i
] & _REGION_ENTRY_ORIGIN
))
1297 r3t
= (unsigned long *)(r2t
[i
] & _REGION_ENTRY_ORIGIN
);
1298 r2t
[i
] = _REGION2_ENTRY_EMPTY
;
1299 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1300 /* Free region 3 table */
1301 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1302 list_del(&page
->lru
);
1303 __free_pages(page
, 2);
1308 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1309 * @sg: pointer to the shadow guest address space structure
1310 * @raddr: rmap address in the shadow guest address space
1312 * Called with the sg->guest_table_lock
1314 static void gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
)
1316 unsigned long r1o
, *r1e
, *r2t
;
1319 BUG_ON(!gmap_is_shadow(sg
));
1320 r1e
= gmap_table_walk(sg
, raddr
, 4); /* get region-1 pointer */
1321 if (!r1e
|| !(*r1e
& _REGION_ENTRY_ORIGIN
))
1323 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 53) - 1);
1324 r1o
= (unsigned long) (r1e
- ((raddr
>> 53) & 0x7ff));
1325 gmap_idte_one(r1o
| _ASCE_TYPE_REGION1
, raddr
);
1326 r2t
= (unsigned long *)(*r1e
& _REGION_ENTRY_ORIGIN
);
1327 *r1e
= _REGION1_ENTRY_EMPTY
;
1328 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1329 /* Free region 2 table */
1330 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1331 list_del(&page
->lru
);
1332 __free_pages(page
, 2);
1336 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1337 * @sg: pointer to the shadow guest address space structure
1338 * @raddr: rmap address in the shadow guest address space
1339 * @r1t: pointer to the start of a shadow region-1 table
1341 * Called with the shadow->guest_table_lock
1343 static void __gmap_unshadow_r1t(struct gmap
*sg
, unsigned long raddr
,
1346 unsigned long asce
, *r2t
;
1350 BUG_ON(!gmap_is_shadow(sg
));
1351 asce
= (unsigned long) r1t
| _ASCE_TYPE_REGION1
;
1352 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 53) {
1353 if (!(r1t
[i
] & _REGION_ENTRY_ORIGIN
))
1355 r2t
= (unsigned long *)(r1t
[i
] & _REGION_ENTRY_ORIGIN
);
1356 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1357 /* Clear entry and flush translation r1t -> r2t */
1358 gmap_idte_one(asce
, raddr
);
1359 r1t
[i
] = _REGION1_ENTRY_EMPTY
;
1360 /* Free region 2 table */
1361 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1362 list_del(&page
->lru
);
1363 __free_pages(page
, 2);
1368 * gmap_unshadow - remove a shadow page table completely
1369 * @sg: pointer to the shadow guest address space structure
1371 * Called with sg->guest_table_lock
1373 static void gmap_unshadow(struct gmap
*sg
)
1375 unsigned long *table
;
1377 BUG_ON(!gmap_is_shadow(sg
));
1381 gmap_call_notifier(sg
, 0, -1UL);
1383 table
= (unsigned long *)(sg
->asce
& _ASCE_ORIGIN
);
1384 switch (sg
->asce
& _ASCE_TYPE_MASK
) {
1385 case _ASCE_TYPE_REGION1
:
1386 __gmap_unshadow_r1t(sg
, 0, table
);
1388 case _ASCE_TYPE_REGION2
:
1389 __gmap_unshadow_r2t(sg
, 0, table
);
1391 case _ASCE_TYPE_REGION3
:
1392 __gmap_unshadow_r3t(sg
, 0, table
);
1394 case _ASCE_TYPE_SEGMENT
:
1395 __gmap_unshadow_sgt(sg
, 0, table
);
1401 * gmap_find_shadow - find a specific asce in the list of shadow tables
1402 * @parent: pointer to the parent gmap
1403 * @asce: ASCE for which the shadow table is created
1404 * @edat_level: edat level to be used for the shadow translation
1406 * Returns the pointer to a gmap if a shadow table with the given asce is
1407 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1410 static struct gmap
*gmap_find_shadow(struct gmap
*parent
, unsigned long asce
,
1415 list_for_each_entry(sg
, &parent
->children
, list
) {
1416 if (sg
->orig_asce
!= asce
|| sg
->edat_level
!= edat_level
||
1419 if (!sg
->initialized
)
1420 return ERR_PTR(-EAGAIN
);
1421 atomic_inc(&sg
->ref_count
);
1428 * gmap_shadow_valid - check if a shadow guest address space matches the
1429 * given properties and is still valid
1430 * @sg: pointer to the shadow guest address space structure
1431 * @asce: ASCE for which the shadow table is requested
1432 * @edat_level: edat level to be used for the shadow translation
1434 * Returns 1 if the gmap shadow is still valid and matches the given
1435 * properties, the caller can continue using it. Returns 0 otherwise, the
1436 * caller has to request a new shadow gmap in this case.
1439 int gmap_shadow_valid(struct gmap
*sg
, unsigned long asce
, int edat_level
)
1443 return sg
->orig_asce
== asce
&& sg
->edat_level
== edat_level
;
1445 EXPORT_SYMBOL_GPL(gmap_shadow_valid
);
1448 * gmap_shadow - create/find a shadow guest address space
1449 * @parent: pointer to the parent gmap
1450 * @asce: ASCE for which the shadow table is created
1451 * @edat_level: edat level to be used for the shadow translation
1453 * The pages of the top level page table referred by the asce parameter
1454 * will be set to read-only and marked in the PGSTEs of the kvm process.
1455 * The shadow table will be removed automatically on any change to the
1456 * PTE mapping for the source table.
1458 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1459 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1460 * parent gmap table could not be protected.
1462 struct gmap
*gmap_shadow(struct gmap
*parent
, unsigned long asce
,
1465 struct gmap
*sg
, *new;
1466 unsigned long limit
;
1469 BUG_ON(gmap_is_shadow(parent
));
1470 spin_lock(&parent
->shadow_lock
);
1471 sg
= gmap_find_shadow(parent
, asce
, edat_level
);
1472 spin_unlock(&parent
->shadow_lock
);
1475 /* Create a new shadow gmap */
1476 limit
= -1UL >> (33 - (((asce
& _ASCE_TYPE_MASK
) >> 2) * 11));
1477 if (asce
& _ASCE_REAL_SPACE
)
1479 new = gmap_alloc(limit
);
1481 return ERR_PTR(-ENOMEM
);
1482 new->mm
= parent
->mm
;
1483 new->parent
= gmap_get(parent
);
1484 new->orig_asce
= asce
;
1485 new->edat_level
= edat_level
;
1486 new->initialized
= false;
1487 spin_lock(&parent
->shadow_lock
);
1488 /* Recheck if another CPU created the same shadow */
1489 sg
= gmap_find_shadow(parent
, asce
, edat_level
);
1491 spin_unlock(&parent
->shadow_lock
);
1495 if (asce
& _ASCE_REAL_SPACE
) {
1496 /* only allow one real-space gmap shadow */
1497 list_for_each_entry(sg
, &parent
->children
, list
) {
1498 if (sg
->orig_asce
& _ASCE_REAL_SPACE
) {
1499 spin_lock(&sg
->guest_table_lock
);
1501 spin_unlock(&sg
->guest_table_lock
);
1502 list_del(&sg
->list
);
1508 atomic_set(&new->ref_count
, 2);
1509 list_add(&new->list
, &parent
->children
);
1510 if (asce
& _ASCE_REAL_SPACE
) {
1511 /* nothing to protect, return right away */
1512 new->initialized
= true;
1513 spin_unlock(&parent
->shadow_lock
);
1516 spin_unlock(&parent
->shadow_lock
);
1517 /* protect after insertion, so it will get properly invalidated */
1518 down_read(&parent
->mm
->mmap_sem
);
1519 rc
= gmap_protect_range(parent
, asce
& _ASCE_ORIGIN
,
1520 ((asce
& _ASCE_TABLE_LENGTH
) + 1) * 4096,
1521 PROT_READ
, PGSTE_VSIE_BIT
);
1522 up_read(&parent
->mm
->mmap_sem
);
1523 spin_lock(&parent
->shadow_lock
);
1524 new->initialized
= true;
1526 list_del(&new->list
);
1530 spin_unlock(&parent
->shadow_lock
);
1533 EXPORT_SYMBOL_GPL(gmap_shadow
);
1536 * gmap_shadow_r2t - create an empty shadow region 2 table
1537 * @sg: pointer to the shadow guest address space structure
1538 * @saddr: faulting address in the shadow gmap
1539 * @r2t: parent gmap address of the region 2 table to get shadowed
1540 * @fake: r2t references contiguous guest memory block, not a r2t
1542 * The r2t parameter specifies the address of the source table. The
1543 * four pages of the source table are made read-only in the parent gmap
1544 * address space. A write to the source table area @r2t will automatically
1545 * remove the shadow r2 table and all of its decendents.
1547 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1548 * shadow table structure is incomplete, -ENOMEM if out of memory and
1549 * -EFAULT if an address in the parent gmap could not be resolved.
1551 * Called with sg->mm->mmap_sem in read.
1553 int gmap_shadow_r2t(struct gmap
*sg
, unsigned long saddr
, unsigned long r2t
,
1556 unsigned long raddr
, origin
, offset
, len
;
1557 unsigned long *s_r2t
, *table
;
1561 BUG_ON(!gmap_is_shadow(sg
));
1562 /* Allocate a shadow region second table */
1563 page
= alloc_pages(GFP_KERNEL
, 2);
1566 page
->index
= r2t
& _REGION_ENTRY_ORIGIN
;
1568 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1569 s_r2t
= (unsigned long *) page_to_phys(page
);
1570 /* Install shadow region second table */
1571 spin_lock(&sg
->guest_table_lock
);
1572 table
= gmap_table_walk(sg
, saddr
, 4); /* get region-1 pointer */
1574 rc
= -EAGAIN
; /* Race with unshadow */
1577 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1578 rc
= 0; /* Already established */
1580 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1581 rc
= -EAGAIN
; /* Race with shadow */
1584 crst_table_init(s_r2t
, _REGION2_ENTRY_EMPTY
);
1585 /* mark as invalid as long as the parent table is not protected */
1586 *table
= (unsigned long) s_r2t
| _REGION_ENTRY_LENGTH
|
1587 _REGION_ENTRY_TYPE_R1
| _REGION_ENTRY_INVALID
;
1588 if (sg
->edat_level
>= 1)
1589 *table
|= (r2t
& _REGION_ENTRY_PROTECT
);
1590 list_add(&page
->lru
, &sg
->crst_list
);
1592 /* nothing to protect for fake tables */
1593 *table
&= ~_REGION_ENTRY_INVALID
;
1594 spin_unlock(&sg
->guest_table_lock
);
1597 spin_unlock(&sg
->guest_table_lock
);
1598 /* Make r2t read-only in parent gmap page table */
1599 raddr
= (saddr
& 0xffe0000000000000UL
) | _SHADOW_RMAP_REGION1
;
1600 origin
= r2t
& _REGION_ENTRY_ORIGIN
;
1601 offset
= ((r2t
& _REGION_ENTRY_OFFSET
) >> 6) * 4096;
1602 len
= ((r2t
& _REGION_ENTRY_LENGTH
) + 1) * 4096 - offset
;
1603 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1604 spin_lock(&sg
->guest_table_lock
);
1606 table
= gmap_table_walk(sg
, saddr
, 4);
1607 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1608 (unsigned long) s_r2t
)
1609 rc
= -EAGAIN
; /* Race with unshadow */
1611 *table
&= ~_REGION_ENTRY_INVALID
;
1613 gmap_unshadow_r2t(sg
, raddr
);
1615 spin_unlock(&sg
->guest_table_lock
);
1618 spin_unlock(&sg
->guest_table_lock
);
1619 __free_pages(page
, 2);
1622 EXPORT_SYMBOL_GPL(gmap_shadow_r2t
);
1625 * gmap_shadow_r3t - create a shadow region 3 table
1626 * @sg: pointer to the shadow guest address space structure
1627 * @saddr: faulting address in the shadow gmap
1628 * @r3t: parent gmap address of the region 3 table to get shadowed
1629 * @fake: r3t references contiguous guest memory block, not a r3t
1631 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1632 * shadow table structure is incomplete, -ENOMEM if out of memory and
1633 * -EFAULT if an address in the parent gmap could not be resolved.
1635 * Called with sg->mm->mmap_sem in read.
1637 int gmap_shadow_r3t(struct gmap
*sg
, unsigned long saddr
, unsigned long r3t
,
1640 unsigned long raddr
, origin
, offset
, len
;
1641 unsigned long *s_r3t
, *table
;
1645 BUG_ON(!gmap_is_shadow(sg
));
1646 /* Allocate a shadow region second table */
1647 page
= alloc_pages(GFP_KERNEL
, 2);
1650 page
->index
= r3t
& _REGION_ENTRY_ORIGIN
;
1652 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1653 s_r3t
= (unsigned long *) page_to_phys(page
);
1654 /* Install shadow region second table */
1655 spin_lock(&sg
->guest_table_lock
);
1656 table
= gmap_table_walk(sg
, saddr
, 3); /* get region-2 pointer */
1658 rc
= -EAGAIN
; /* Race with unshadow */
1661 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1662 rc
= 0; /* Already established */
1664 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1665 rc
= -EAGAIN
; /* Race with shadow */
1667 crst_table_init(s_r3t
, _REGION3_ENTRY_EMPTY
);
1668 /* mark as invalid as long as the parent table is not protected */
1669 *table
= (unsigned long) s_r3t
| _REGION_ENTRY_LENGTH
|
1670 _REGION_ENTRY_TYPE_R2
| _REGION_ENTRY_INVALID
;
1671 if (sg
->edat_level
>= 1)
1672 *table
|= (r3t
& _REGION_ENTRY_PROTECT
);
1673 list_add(&page
->lru
, &sg
->crst_list
);
1675 /* nothing to protect for fake tables */
1676 *table
&= ~_REGION_ENTRY_INVALID
;
1677 spin_unlock(&sg
->guest_table_lock
);
1680 spin_unlock(&sg
->guest_table_lock
);
1681 /* Make r3t read-only in parent gmap page table */
1682 raddr
= (saddr
& 0xfffffc0000000000UL
) | _SHADOW_RMAP_REGION2
;
1683 origin
= r3t
& _REGION_ENTRY_ORIGIN
;
1684 offset
= ((r3t
& _REGION_ENTRY_OFFSET
) >> 6) * 4096;
1685 len
= ((r3t
& _REGION_ENTRY_LENGTH
) + 1) * 4096 - offset
;
1686 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1687 spin_lock(&sg
->guest_table_lock
);
1689 table
= gmap_table_walk(sg
, saddr
, 3);
1690 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1691 (unsigned long) s_r3t
)
1692 rc
= -EAGAIN
; /* Race with unshadow */
1694 *table
&= ~_REGION_ENTRY_INVALID
;
1696 gmap_unshadow_r3t(sg
, raddr
);
1698 spin_unlock(&sg
->guest_table_lock
);
1701 spin_unlock(&sg
->guest_table_lock
);
1702 __free_pages(page
, 2);
1705 EXPORT_SYMBOL_GPL(gmap_shadow_r3t
);
1708 * gmap_shadow_sgt - create a shadow segment table
1709 * @sg: pointer to the shadow guest address space structure
1710 * @saddr: faulting address in the shadow gmap
1711 * @sgt: parent gmap address of the segment table to get shadowed
1712 * @fake: sgt references contiguous guest memory block, not a sgt
1714 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1715 * shadow table structure is incomplete, -ENOMEM if out of memory and
1716 * -EFAULT if an address in the parent gmap could not be resolved.
1718 * Called with sg->mm->mmap_sem in read.
1720 int gmap_shadow_sgt(struct gmap
*sg
, unsigned long saddr
, unsigned long sgt
,
1723 unsigned long raddr
, origin
, offset
, len
;
1724 unsigned long *s_sgt
, *table
;
1728 BUG_ON(!gmap_is_shadow(sg
) || (sgt
& _REGION3_ENTRY_LARGE
));
1729 /* Allocate a shadow segment table */
1730 page
= alloc_pages(GFP_KERNEL
, 2);
1733 page
->index
= sgt
& _REGION_ENTRY_ORIGIN
;
1735 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1736 s_sgt
= (unsigned long *) page_to_phys(page
);
1737 /* Install shadow region second table */
1738 spin_lock(&sg
->guest_table_lock
);
1739 table
= gmap_table_walk(sg
, saddr
, 2); /* get region-3 pointer */
1741 rc
= -EAGAIN
; /* Race with unshadow */
1744 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1745 rc
= 0; /* Already established */
1747 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1748 rc
= -EAGAIN
; /* Race with shadow */
1751 crst_table_init(s_sgt
, _SEGMENT_ENTRY_EMPTY
);
1752 /* mark as invalid as long as the parent table is not protected */
1753 *table
= (unsigned long) s_sgt
| _REGION_ENTRY_LENGTH
|
1754 _REGION_ENTRY_TYPE_R3
| _REGION_ENTRY_INVALID
;
1755 if (sg
->edat_level
>= 1)
1756 *table
|= sgt
& _REGION_ENTRY_PROTECT
;
1757 list_add(&page
->lru
, &sg
->crst_list
);
1759 /* nothing to protect for fake tables */
1760 *table
&= ~_REGION_ENTRY_INVALID
;
1761 spin_unlock(&sg
->guest_table_lock
);
1764 spin_unlock(&sg
->guest_table_lock
);
1765 /* Make sgt read-only in parent gmap page table */
1766 raddr
= (saddr
& 0xffffffff80000000UL
) | _SHADOW_RMAP_REGION3
;
1767 origin
= sgt
& _REGION_ENTRY_ORIGIN
;
1768 offset
= ((sgt
& _REGION_ENTRY_OFFSET
) >> 6) * 4096;
1769 len
= ((sgt
& _REGION_ENTRY_LENGTH
) + 1) * 4096 - offset
;
1770 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1771 spin_lock(&sg
->guest_table_lock
);
1773 table
= gmap_table_walk(sg
, saddr
, 2);
1774 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1775 (unsigned long) s_sgt
)
1776 rc
= -EAGAIN
; /* Race with unshadow */
1778 *table
&= ~_REGION_ENTRY_INVALID
;
1780 gmap_unshadow_sgt(sg
, raddr
);
1782 spin_unlock(&sg
->guest_table_lock
);
1785 spin_unlock(&sg
->guest_table_lock
);
1786 __free_pages(page
, 2);
1789 EXPORT_SYMBOL_GPL(gmap_shadow_sgt
);
1792 * gmap_shadow_lookup_pgtable - find a shadow page table
1793 * @sg: pointer to the shadow guest address space structure
1794 * @saddr: the address in the shadow aguest address space
1795 * @pgt: parent gmap address of the page table to get shadowed
1796 * @dat_protection: if the pgtable is marked as protected by dat
1797 * @fake: pgt references contiguous guest memory block, not a pgtable
1799 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1800 * table was not found.
1802 * Called with sg->mm->mmap_sem in read.
1804 int gmap_shadow_pgt_lookup(struct gmap
*sg
, unsigned long saddr
,
1805 unsigned long *pgt
, int *dat_protection
,
1808 unsigned long *table
;
1812 BUG_ON(!gmap_is_shadow(sg
));
1813 spin_lock(&sg
->guest_table_lock
);
1814 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1815 if (table
&& !(*table
& _SEGMENT_ENTRY_INVALID
)) {
1816 /* Shadow page tables are full pages (pte+pgste) */
1817 page
= pfn_to_page(*table
>> PAGE_SHIFT
);
1818 *pgt
= page
->index
& ~GMAP_SHADOW_FAKE_TABLE
;
1819 *dat_protection
= !!(*table
& _SEGMENT_ENTRY_PROTECT
);
1820 *fake
= !!(page
->index
& GMAP_SHADOW_FAKE_TABLE
);
1825 spin_unlock(&sg
->guest_table_lock
);
1829 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup
);
1832 * gmap_shadow_pgt - instantiate a shadow page table
1833 * @sg: pointer to the shadow guest address space structure
1834 * @saddr: faulting address in the shadow gmap
1835 * @pgt: parent gmap address of the page table to get shadowed
1836 * @fake: pgt references contiguous guest memory block, not a pgtable
1838 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1839 * shadow table structure is incomplete, -ENOMEM if out of memory,
1840 * -EFAULT if an address in the parent gmap could not be resolved and
1842 * Called with gmap->mm->mmap_sem in read
1844 int gmap_shadow_pgt(struct gmap
*sg
, unsigned long saddr
, unsigned long pgt
,
1847 unsigned long raddr
, origin
;
1848 unsigned long *s_pgt
, *table
;
1852 BUG_ON(!gmap_is_shadow(sg
) || (pgt
& _SEGMENT_ENTRY_LARGE
));
1853 /* Allocate a shadow page table */
1854 page
= page_table_alloc_pgste(sg
->mm
);
1857 page
->index
= pgt
& _SEGMENT_ENTRY_ORIGIN
;
1859 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1860 s_pgt
= (unsigned long *) page_to_phys(page
);
1861 /* Install shadow page table */
1862 spin_lock(&sg
->guest_table_lock
);
1863 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1865 rc
= -EAGAIN
; /* Race with unshadow */
1868 if (!(*table
& _SEGMENT_ENTRY_INVALID
)) {
1869 rc
= 0; /* Already established */
1871 } else if (*table
& _SEGMENT_ENTRY_ORIGIN
) {
1872 rc
= -EAGAIN
; /* Race with shadow */
1875 /* mark as invalid as long as the parent table is not protected */
1876 *table
= (unsigned long) s_pgt
| _SEGMENT_ENTRY
|
1877 (pgt
& _SEGMENT_ENTRY_PROTECT
) | _SEGMENT_ENTRY_INVALID
;
1878 list_add(&page
->lru
, &sg
->pt_list
);
1880 /* nothing to protect for fake tables */
1881 *table
&= ~_SEGMENT_ENTRY_INVALID
;
1882 spin_unlock(&sg
->guest_table_lock
);
1885 spin_unlock(&sg
->guest_table_lock
);
1886 /* Make pgt read-only in parent gmap page table (not the pgste) */
1887 raddr
= (saddr
& 0xfffffffffff00000UL
) | _SHADOW_RMAP_SEGMENT
;
1888 origin
= pgt
& _SEGMENT_ENTRY_ORIGIN
& PAGE_MASK
;
1889 rc
= gmap_protect_rmap(sg
, raddr
, origin
, PAGE_SIZE
, PROT_READ
);
1890 spin_lock(&sg
->guest_table_lock
);
1892 table
= gmap_table_walk(sg
, saddr
, 1);
1893 if (!table
|| (*table
& _SEGMENT_ENTRY_ORIGIN
) !=
1894 (unsigned long) s_pgt
)
1895 rc
= -EAGAIN
; /* Race with unshadow */
1897 *table
&= ~_SEGMENT_ENTRY_INVALID
;
1899 gmap_unshadow_pgt(sg
, raddr
);
1901 spin_unlock(&sg
->guest_table_lock
);
1904 spin_unlock(&sg
->guest_table_lock
);
1905 page_table_free_pgste(page
);
1909 EXPORT_SYMBOL_GPL(gmap_shadow_pgt
);
1912 * gmap_shadow_page - create a shadow page mapping
1913 * @sg: pointer to the shadow guest address space structure
1914 * @saddr: faulting address in the shadow gmap
1915 * @pte: pte in parent gmap address space to get shadowed
1917 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1918 * shadow table structure is incomplete, -ENOMEM if out of memory and
1919 * -EFAULT if an address in the parent gmap could not be resolved.
1921 * Called with sg->mm->mmap_sem in read.
1923 int gmap_shadow_page(struct gmap
*sg
, unsigned long saddr
, pte_t pte
)
1925 struct gmap
*parent
;
1926 struct gmap_rmap
*rmap
;
1927 unsigned long vmaddr
, paddr
;
1929 pte_t
*sptep
, *tptep
;
1933 BUG_ON(!gmap_is_shadow(sg
));
1934 parent
= sg
->parent
;
1935 prot
= (pte_val(pte
) & _PAGE_PROTECT
) ? PROT_READ
: PROT_WRITE
;
1937 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1940 rmap
->raddr
= (saddr
& PAGE_MASK
) | _SHADOW_RMAP_PGTABLE
;
1943 paddr
= pte_val(pte
) & PAGE_MASK
;
1944 vmaddr
= __gmap_translate(parent
, paddr
);
1945 if (IS_ERR_VALUE(vmaddr
)) {
1949 rc
= radix_tree_preload(GFP_KERNEL
);
1953 sptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1955 spin_lock(&sg
->guest_table_lock
);
1956 /* Get page table pointer */
1957 tptep
= (pte_t
*) gmap_table_walk(sg
, saddr
, 0);
1959 spin_unlock(&sg
->guest_table_lock
);
1960 gmap_pte_op_end(ptl
);
1961 radix_tree_preload_end();
1964 rc
= ptep_shadow_pte(sg
->mm
, saddr
, sptep
, tptep
, pte
);
1966 /* Success and a new mapping */
1967 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1971 gmap_pte_op_end(ptl
);
1972 spin_unlock(&sg
->guest_table_lock
);
1974 radix_tree_preload_end();
1977 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
, prot
);
1984 EXPORT_SYMBOL_GPL(gmap_shadow_page
);
1987 * gmap_shadow_notify - handle notifications for shadow gmap
1989 * Called with sg->parent->shadow_lock.
1991 static void gmap_shadow_notify(struct gmap
*sg
, unsigned long vmaddr
,
1992 unsigned long offset
, pte_t
*pte
)
1994 struct gmap_rmap
*rmap
, *rnext
, *head
;
1995 unsigned long gaddr
, start
, end
, bits
, raddr
;
1996 unsigned long *table
;
1998 BUG_ON(!gmap_is_shadow(sg
));
1999 spin_lock(&sg
->parent
->guest_table_lock
);
2000 table
= radix_tree_lookup(&sg
->parent
->host_to_guest
,
2001 vmaddr
>> PMD_SHIFT
);
2002 gaddr
= table
? __gmap_segment_gaddr(table
) + offset
: 0;
2003 spin_unlock(&sg
->parent
->guest_table_lock
);
2007 spin_lock(&sg
->guest_table_lock
);
2009 spin_unlock(&sg
->guest_table_lock
);
2012 /* Check for top level table */
2013 start
= sg
->orig_asce
& _ASCE_ORIGIN
;
2014 end
= start
+ ((sg
->orig_asce
& _ASCE_TABLE_LENGTH
) + 1) * 4096;
2015 if (!(sg
->orig_asce
& _ASCE_REAL_SPACE
) && gaddr
>= start
&&
2017 /* The complete shadow table has to go */
2019 spin_unlock(&sg
->guest_table_lock
);
2020 list_del(&sg
->list
);
2024 /* Remove the page table tree from on specific entry */
2025 head
= radix_tree_delete(&sg
->host_to_rmap
, vmaddr
>> 12);
2026 gmap_for_each_rmap_safe(rmap
, rnext
, head
) {
2027 bits
= rmap
->raddr
& _SHADOW_RMAP_MASK
;
2028 raddr
= rmap
->raddr
^ bits
;
2030 case _SHADOW_RMAP_REGION1
:
2031 gmap_unshadow_r2t(sg
, raddr
);
2033 case _SHADOW_RMAP_REGION2
:
2034 gmap_unshadow_r3t(sg
, raddr
);
2036 case _SHADOW_RMAP_REGION3
:
2037 gmap_unshadow_sgt(sg
, raddr
);
2039 case _SHADOW_RMAP_SEGMENT
:
2040 gmap_unshadow_pgt(sg
, raddr
);
2042 case _SHADOW_RMAP_PGTABLE
:
2043 gmap_unshadow_page(sg
, raddr
);
2048 spin_unlock(&sg
->guest_table_lock
);
2052 * ptep_notify - call all invalidation callbacks for a specific pte.
2053 * @mm: pointer to the process mm_struct
2054 * @addr: virtual address in the process address space
2055 * @pte: pointer to the page table entry
2056 * @bits: bits from the pgste that caused the notify call
2058 * This function is assumed to be called with the page table lock held
2059 * for the pte to notify.
2061 void ptep_notify(struct mm_struct
*mm
, unsigned long vmaddr
,
2062 pte_t
*pte
, unsigned long bits
)
2064 unsigned long offset
, gaddr
;
2065 unsigned long *table
;
2066 struct gmap
*gmap
, *sg
, *next
;
2068 offset
= ((unsigned long) pte
) & (255 * sizeof(pte_t
));
2069 offset
= offset
* (4096 / sizeof(pte_t
));
2071 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
2072 if (!list_empty(&gmap
->children
) && (bits
& PGSTE_VSIE_BIT
)) {
2073 spin_lock(&gmap
->shadow_lock
);
2074 list_for_each_entry_safe(sg
, next
,
2075 &gmap
->children
, list
)
2076 gmap_shadow_notify(sg
, vmaddr
, offset
, pte
);
2077 spin_unlock(&gmap
->shadow_lock
);
2079 if (!(bits
& PGSTE_IN_BIT
))
2081 spin_lock(&gmap
->guest_table_lock
);
2082 table
= radix_tree_lookup(&gmap
->host_to_guest
,
2083 vmaddr
>> PMD_SHIFT
);
2085 gaddr
= __gmap_segment_gaddr(table
) + offset
;
2086 spin_unlock(&gmap
->guest_table_lock
);
2088 gmap_call_notifier(gmap
, gaddr
, gaddr
+ PAGE_SIZE
- 1);
2092 EXPORT_SYMBOL_GPL(ptep_notify
);
2094 static inline void thp_split_mm(struct mm_struct
*mm
)
2096 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2097 struct vm_area_struct
*vma
;
2100 for (vma
= mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
2101 for (addr
= vma
->vm_start
;
2104 follow_page(vma
, addr
, FOLL_SPLIT
);
2105 vma
->vm_flags
&= ~VM_HUGEPAGE
;
2106 vma
->vm_flags
|= VM_NOHUGEPAGE
;
2108 mm
->def_flags
|= VM_NOHUGEPAGE
;
2113 * switch on pgstes for its userspace process (for kvm)
2115 int s390_enable_sie(void)
2117 struct mm_struct
*mm
= current
->mm
;
2119 /* Do we have pgstes? if yes, we are done */
2120 if (mm_has_pgste(mm
))
2122 /* Fail if the page tables are 2K */
2123 if (!mm_alloc_pgste(mm
))
2125 down_write(&mm
->mmap_sem
);
2126 mm
->context
.has_pgste
= 1;
2127 /* split thp mappings and disable thp for future mappings */
2129 up_write(&mm
->mmap_sem
);
2132 EXPORT_SYMBOL_GPL(s390_enable_sie
);
2135 * Enable storage key handling from now on and initialize the storage
2136 * keys with the default key.
2138 static int __s390_enable_skey(pte_t
*pte
, unsigned long addr
,
2139 unsigned long next
, struct mm_walk
*walk
)
2142 * Remove all zero page mappings,
2143 * after establishing a policy to forbid zero page mappings
2144 * following faults for that page will get fresh anonymous pages
2146 if (is_zero_pfn(pte_pfn(*pte
)))
2147 ptep_xchg_direct(walk
->mm
, addr
, pte
, __pte(_PAGE_INVALID
));
2148 /* Clear storage key */
2149 ptep_zap_key(walk
->mm
, addr
, pte
);
2153 int s390_enable_skey(void)
2155 struct mm_walk walk
= { .pte_entry
= __s390_enable_skey
};
2156 struct mm_struct
*mm
= current
->mm
;
2157 struct vm_area_struct
*vma
;
2160 down_write(&mm
->mmap_sem
);
2161 if (mm_use_skey(mm
))
2164 mm
->context
.use_skey
= 1;
2165 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
2166 if (ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
2167 MADV_UNMERGEABLE
, &vma
->vm_flags
)) {
2168 mm
->context
.use_skey
= 0;
2173 mm
->def_flags
&= ~VM_MERGEABLE
;
2176 walk_page_range(0, TASK_SIZE
, &walk
);
2179 up_write(&mm
->mmap_sem
);
2182 EXPORT_SYMBOL_GPL(s390_enable_skey
);
2185 * Reset CMMA state, make all pages stable again.
2187 static int __s390_reset_cmma(pte_t
*pte
, unsigned long addr
,
2188 unsigned long next
, struct mm_walk
*walk
)
2190 ptep_zap_unused(walk
->mm
, addr
, pte
, 1);
2194 void s390_reset_cmma(struct mm_struct
*mm
)
2196 struct mm_walk walk
= { .pte_entry
= __s390_reset_cmma
};
2198 down_write(&mm
->mmap_sem
);
2200 walk_page_range(0, TASK_SIZE
, &walk
);
2201 up_write(&mm
->mmap_sem
);
2203 EXPORT_SYMBOL_GPL(s390_reset_cmma
);