2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
23 #define GMAP_SHADOW_FAKE_TABLE 1ULL
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
30 * Returns a guest address space structure.
32 static struct gmap
*gmap_alloc(unsigned long limit
)
37 unsigned long etype
, atype
;
39 if (limit
< _REGION3_SIZE
) {
40 limit
= _REGION3_SIZE
- 1;
41 atype
= _ASCE_TYPE_SEGMENT
;
42 etype
= _SEGMENT_ENTRY_EMPTY
;
43 } else if (limit
< _REGION2_SIZE
) {
44 limit
= _REGION2_SIZE
- 1;
45 atype
= _ASCE_TYPE_REGION3
;
46 etype
= _REGION3_ENTRY_EMPTY
;
47 } else if (limit
< _REGION1_SIZE
) {
48 limit
= _REGION1_SIZE
- 1;
49 atype
= _ASCE_TYPE_REGION2
;
50 etype
= _REGION2_ENTRY_EMPTY
;
53 atype
= _ASCE_TYPE_REGION1
;
54 etype
= _REGION1_ENTRY_EMPTY
;
56 gmap
= kzalloc(sizeof(struct gmap
), GFP_KERNEL
);
59 INIT_LIST_HEAD(&gmap
->crst_list
);
60 INIT_LIST_HEAD(&gmap
->children
);
61 INIT_LIST_HEAD(&gmap
->pt_list
);
62 INIT_RADIX_TREE(&gmap
->guest_to_host
, GFP_KERNEL
);
63 INIT_RADIX_TREE(&gmap
->host_to_guest
, GFP_ATOMIC
);
64 INIT_RADIX_TREE(&gmap
->host_to_rmap
, GFP_ATOMIC
);
65 spin_lock_init(&gmap
->guest_table_lock
);
66 spin_lock_init(&gmap
->shadow_lock
);
67 atomic_set(&gmap
->ref_count
, 1);
68 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
72 list_add(&page
->lru
, &gmap
->crst_list
);
73 table
= (unsigned long *) page_to_phys(page
);
74 crst_table_init(table
, etype
);
76 gmap
->asce
= atype
| _ASCE_TABLE_LENGTH
|
77 _ASCE_USER_BITS
| __pa(table
);
78 gmap
->asce_end
= limit
;
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
92 * Returns a guest address space structure.
94 struct gmap
*gmap_create(struct mm_struct
*mm
, unsigned long limit
)
97 unsigned long gmap_asce
;
99 gmap
= gmap_alloc(limit
);
103 spin_lock(&mm
->context
.lock
);
104 list_add_rcu(&gmap
->list
, &mm
->context
.gmap_list
);
105 if (list_is_singular(&mm
->context
.gmap_list
))
106 gmap_asce
= gmap
->asce
;
109 WRITE_ONCE(mm
->context
.gmap_asce
, gmap_asce
);
110 spin_unlock(&mm
->context
.lock
);
113 EXPORT_SYMBOL_GPL(gmap_create
);
115 static void gmap_flush_tlb(struct gmap
*gmap
)
117 if (MACHINE_HAS_IDTE
)
118 __tlb_flush_idte(gmap
->asce
);
120 __tlb_flush_global();
123 static void gmap_radix_tree_free(struct radix_tree_root
*root
)
125 struct radix_tree_iter iter
;
126 unsigned long indices
[16];
131 /* A radix tree is freed by deleting all of its entries */
135 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
136 indices
[nr
] = iter
.index
;
140 for (i
= 0; i
< nr
; i
++) {
142 radix_tree_delete(root
, index
);
147 static void gmap_rmap_radix_tree_free(struct radix_tree_root
*root
)
149 struct gmap_rmap
*rmap
, *rnext
, *head
;
150 struct radix_tree_iter iter
;
151 unsigned long indices
[16];
156 /* A radix tree is freed by deleting all of its entries */
160 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
161 indices
[nr
] = iter
.index
;
165 for (i
= 0; i
< nr
; i
++) {
167 head
= radix_tree_delete(root
, index
);
168 gmap_for_each_rmap_safe(rmap
, rnext
, head
)
175 * gmap_free - free a guest address space
176 * @gmap: pointer to the guest address space structure
178 * No locks required. There are no references to this gmap anymore.
180 static void gmap_free(struct gmap
*gmap
)
182 struct page
*page
, *next
;
184 /* Flush tlb of all gmaps (if not already done for shadows) */
185 if (!(gmap_is_shadow(gmap
) && gmap
->removed
))
186 gmap_flush_tlb(gmap
);
187 /* Free all segment & region tables. */
188 list_for_each_entry_safe(page
, next
, &gmap
->crst_list
, lru
)
189 __free_pages(page
, CRST_ALLOC_ORDER
);
190 gmap_radix_tree_free(&gmap
->guest_to_host
);
191 gmap_radix_tree_free(&gmap
->host_to_guest
);
193 /* Free additional data for a shadow gmap */
194 if (gmap_is_shadow(gmap
)) {
195 /* Free all page tables. */
196 list_for_each_entry_safe(page
, next
, &gmap
->pt_list
, lru
)
197 page_table_free_pgste(page
);
198 gmap_rmap_radix_tree_free(&gmap
->host_to_rmap
);
199 /* Release reference to the parent */
200 gmap_put(gmap
->parent
);
207 * gmap_get - increase reference counter for guest address space
208 * @gmap: pointer to the guest address space structure
210 * Returns the gmap pointer
212 struct gmap
*gmap_get(struct gmap
*gmap
)
214 atomic_inc(&gmap
->ref_count
);
217 EXPORT_SYMBOL_GPL(gmap_get
);
220 * gmap_put - decrease reference counter for guest address space
221 * @gmap: pointer to the guest address space structure
223 * If the reference counter reaches zero the guest address space is freed.
225 void gmap_put(struct gmap
*gmap
)
227 if (atomic_dec_return(&gmap
->ref_count
) == 0)
230 EXPORT_SYMBOL_GPL(gmap_put
);
233 * gmap_remove - remove a guest address space but do not free it yet
234 * @gmap: pointer to the guest address space structure
236 void gmap_remove(struct gmap
*gmap
)
238 struct gmap
*sg
, *next
;
239 unsigned long gmap_asce
;
241 /* Remove all shadow gmaps linked to this gmap */
242 if (!list_empty(&gmap
->children
)) {
243 spin_lock(&gmap
->shadow_lock
);
244 list_for_each_entry_safe(sg
, next
, &gmap
->children
, list
) {
248 spin_unlock(&gmap
->shadow_lock
);
250 /* Remove gmap from the pre-mm list */
251 spin_lock(&gmap
->mm
->context
.lock
);
252 list_del_rcu(&gmap
->list
);
253 if (list_empty(&gmap
->mm
->context
.gmap_list
))
255 else if (list_is_singular(&gmap
->mm
->context
.gmap_list
))
256 gmap_asce
= list_first_entry(&gmap
->mm
->context
.gmap_list
,
257 struct gmap
, list
)->asce
;
260 WRITE_ONCE(gmap
->mm
->context
.gmap_asce
, gmap_asce
);
261 spin_unlock(&gmap
->mm
->context
.lock
);
266 EXPORT_SYMBOL_GPL(gmap_remove
);
269 * gmap_enable - switch primary space to the guest address space
270 * @gmap: pointer to the guest address space structure
272 void gmap_enable(struct gmap
*gmap
)
274 S390_lowcore
.gmap
= (unsigned long) gmap
;
276 EXPORT_SYMBOL_GPL(gmap_enable
);
279 * gmap_disable - switch back to the standard primary address space
280 * @gmap: pointer to the guest address space structure
282 void gmap_disable(struct gmap
*gmap
)
284 S390_lowcore
.gmap
= 0UL;
286 EXPORT_SYMBOL_GPL(gmap_disable
);
289 * gmap_get_enabled - get a pointer to the currently enabled gmap
291 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
293 struct gmap
*gmap_get_enabled(void)
295 return (struct gmap
*) S390_lowcore
.gmap
;
297 EXPORT_SYMBOL_GPL(gmap_get_enabled
);
300 * gmap_alloc_table is assumed to be called with mmap_sem held
302 static int gmap_alloc_table(struct gmap
*gmap
, unsigned long *table
,
303 unsigned long init
, unsigned long gaddr
)
308 /* since we dont free the gmap table until gmap_free we can unlock */
309 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
312 new = (unsigned long *) page_to_phys(page
);
313 crst_table_init(new, init
);
314 spin_lock(&gmap
->guest_table_lock
);
315 if (*table
& _REGION_ENTRY_INVALID
) {
316 list_add(&page
->lru
, &gmap
->crst_list
);
317 *table
= (unsigned long) new | _REGION_ENTRY_LENGTH
|
318 (*table
& _REGION_ENTRY_TYPE_MASK
);
322 spin_unlock(&gmap
->guest_table_lock
);
324 __free_pages(page
, CRST_ALLOC_ORDER
);
329 * __gmap_segment_gaddr - find virtual address from segment pointer
330 * @entry: pointer to a segment table entry in the guest address space
332 * Returns the virtual address in the guest address space for the segment
334 static unsigned long __gmap_segment_gaddr(unsigned long *entry
)
337 unsigned long offset
, mask
;
339 offset
= (unsigned long) entry
/ sizeof(unsigned long);
340 offset
= (offset
& (PTRS_PER_PMD
- 1)) * PMD_SIZE
;
341 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
342 page
= virt_to_page((void *)((unsigned long) entry
& mask
));
343 return page
->index
+ offset
;
347 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
348 * @gmap: pointer to the guest address space structure
349 * @vmaddr: address in the host process address space
351 * Returns 1 if a TLB flush is required
353 static int __gmap_unlink_by_vmaddr(struct gmap
*gmap
, unsigned long vmaddr
)
355 unsigned long *entry
;
358 BUG_ON(gmap_is_shadow(gmap
));
359 spin_lock(&gmap
->guest_table_lock
);
360 entry
= radix_tree_delete(&gmap
->host_to_guest
, vmaddr
>> PMD_SHIFT
);
362 flush
= (*entry
!= _SEGMENT_ENTRY_EMPTY
);
363 *entry
= _SEGMENT_ENTRY_EMPTY
;
365 spin_unlock(&gmap
->guest_table_lock
);
370 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
371 * @gmap: pointer to the guest address space structure
372 * @gaddr: address in the guest address space
374 * Returns 1 if a TLB flush is required
376 static int __gmap_unmap_by_gaddr(struct gmap
*gmap
, unsigned long gaddr
)
378 unsigned long vmaddr
;
380 vmaddr
= (unsigned long) radix_tree_delete(&gmap
->guest_to_host
,
382 return vmaddr
? __gmap_unlink_by_vmaddr(gmap
, vmaddr
) : 0;
386 * gmap_unmap_segment - unmap segment from the guest address space
387 * @gmap: pointer to the guest address space structure
388 * @to: address in the guest address space
389 * @len: length of the memory area to unmap
391 * Returns 0 if the unmap succeeded, -EINVAL if not.
393 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
)
398 BUG_ON(gmap_is_shadow(gmap
));
399 if ((to
| len
) & (PMD_SIZE
- 1))
401 if (len
== 0 || to
+ len
< to
)
405 down_write(&gmap
->mm
->mmap_sem
);
406 for (off
= 0; off
< len
; off
+= PMD_SIZE
)
407 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
408 up_write(&gmap
->mm
->mmap_sem
);
410 gmap_flush_tlb(gmap
);
413 EXPORT_SYMBOL_GPL(gmap_unmap_segment
);
416 * gmap_map_segment - map a segment to the guest address space
417 * @gmap: pointer to the guest address space structure
418 * @from: source address in the parent address space
419 * @to: target address in the guest address space
420 * @len: length of the memory area to map
422 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
424 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
425 unsigned long to
, unsigned long len
)
430 BUG_ON(gmap_is_shadow(gmap
));
431 if ((from
| to
| len
) & (PMD_SIZE
- 1))
433 if (len
== 0 || from
+ len
< from
|| to
+ len
< to
||
434 from
+ len
- 1 > TASK_SIZE_MAX
|| to
+ len
- 1 > gmap
->asce_end
)
438 down_write(&gmap
->mm
->mmap_sem
);
439 for (off
= 0; off
< len
; off
+= PMD_SIZE
) {
440 /* Remove old translation */
441 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
442 /* Store new translation */
443 if (radix_tree_insert(&gmap
->guest_to_host
,
444 (to
+ off
) >> PMD_SHIFT
,
445 (void *) from
+ off
))
448 up_write(&gmap
->mm
->mmap_sem
);
450 gmap_flush_tlb(gmap
);
453 gmap_unmap_segment(gmap
, to
, len
);
456 EXPORT_SYMBOL_GPL(gmap_map_segment
);
459 * __gmap_translate - translate a guest address to a user space address
460 * @gmap: pointer to guest mapping meta data structure
461 * @gaddr: guest address
463 * Returns user space address which corresponds to the guest address or
464 * -EFAULT if no such mapping exists.
465 * This function does not establish potentially missing page table entries.
466 * The mmap_sem of the mm that belongs to the address space must be held
467 * when this function gets called.
469 * Note: Can also be called for shadow gmaps.
471 unsigned long __gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
473 unsigned long vmaddr
;
475 vmaddr
= (unsigned long)
476 radix_tree_lookup(&gmap
->guest_to_host
, gaddr
>> PMD_SHIFT
);
477 /* Note: guest_to_host is empty for a shadow gmap */
478 return vmaddr
? (vmaddr
| (gaddr
& ~PMD_MASK
)) : -EFAULT
;
480 EXPORT_SYMBOL_GPL(__gmap_translate
);
483 * gmap_translate - translate a guest address to a user space address
484 * @gmap: pointer to guest mapping meta data structure
485 * @gaddr: guest address
487 * Returns user space address which corresponds to the guest address or
488 * -EFAULT if no such mapping exists.
489 * This function does not establish potentially missing page table entries.
491 unsigned long gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
495 down_read(&gmap
->mm
->mmap_sem
);
496 rc
= __gmap_translate(gmap
, gaddr
);
497 up_read(&gmap
->mm
->mmap_sem
);
500 EXPORT_SYMBOL_GPL(gmap_translate
);
503 * gmap_unlink - disconnect a page table from the gmap shadow tables
504 * @gmap: pointer to guest mapping meta data structure
505 * @table: pointer to the host page table
506 * @vmaddr: vm address associated with the host page table
508 void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
509 unsigned long vmaddr
)
515 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
516 flush
= __gmap_unlink_by_vmaddr(gmap
, vmaddr
);
518 gmap_flush_tlb(gmap
);
524 * gmap_link - set up shadow page tables to connect a host to a guest address
525 * @gmap: pointer to guest mapping meta data structure
526 * @gaddr: guest address
527 * @vmaddr: vm address
529 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
530 * if the vm address is already mapped to a different guest segment.
531 * The mmap_sem of the mm that belongs to the address space must be held
532 * when this function gets called.
534 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
)
536 struct mm_struct
*mm
;
537 unsigned long *table
;
545 BUG_ON(gmap_is_shadow(gmap
));
546 /* Create higher level tables in the gmap page table */
548 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION1
) {
549 table
+= (gaddr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
550 if ((*table
& _REGION_ENTRY_INVALID
) &&
551 gmap_alloc_table(gmap
, table
, _REGION2_ENTRY_EMPTY
,
552 gaddr
& _REGION1_MASK
))
554 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
556 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION2
) {
557 table
+= (gaddr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
558 if ((*table
& _REGION_ENTRY_INVALID
) &&
559 gmap_alloc_table(gmap
, table
, _REGION3_ENTRY_EMPTY
,
560 gaddr
& _REGION2_MASK
))
562 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
564 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION3
) {
565 table
+= (gaddr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
566 if ((*table
& _REGION_ENTRY_INVALID
) &&
567 gmap_alloc_table(gmap
, table
, _SEGMENT_ENTRY_EMPTY
,
568 gaddr
& _REGION3_MASK
))
570 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
572 table
+= (gaddr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
573 /* Walk the parent mm page table */
575 pgd
= pgd_offset(mm
, vmaddr
);
576 VM_BUG_ON(pgd_none(*pgd
));
577 p4d
= p4d_offset(pgd
, vmaddr
);
578 VM_BUG_ON(p4d_none(*p4d
));
579 pud
= pud_offset(p4d
, vmaddr
);
580 VM_BUG_ON(pud_none(*pud
));
581 /* large puds cannot yet be handled */
584 pmd
= pmd_offset(pud
, vmaddr
);
585 VM_BUG_ON(pmd_none(*pmd
));
586 /* large pmds cannot yet be handled */
589 /* Link gmap segment table entry location to page table. */
590 rc
= radix_tree_preload(GFP_KERNEL
);
593 ptl
= pmd_lock(mm
, pmd
);
594 spin_lock(&gmap
->guest_table_lock
);
595 if (*table
== _SEGMENT_ENTRY_EMPTY
) {
596 rc
= radix_tree_insert(&gmap
->host_to_guest
,
597 vmaddr
>> PMD_SHIFT
, table
);
599 *table
= pmd_val(*pmd
);
602 spin_unlock(&gmap
->guest_table_lock
);
604 radix_tree_preload_end();
609 * gmap_fault - resolve a fault on a guest address
610 * @gmap: pointer to guest mapping meta data structure
611 * @gaddr: guest address
612 * @fault_flags: flags to pass down to handle_mm_fault()
614 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
615 * if the vm address is already mapped to a different guest segment.
617 int gmap_fault(struct gmap
*gmap
, unsigned long gaddr
,
618 unsigned int fault_flags
)
620 unsigned long vmaddr
;
624 down_read(&gmap
->mm
->mmap_sem
);
628 vmaddr
= __gmap_translate(gmap
, gaddr
);
629 if (IS_ERR_VALUE(vmaddr
)) {
633 if (fixup_user_fault(current
, gmap
->mm
, vmaddr
, fault_flags
,
639 * In the case that fixup_user_fault unlocked the mmap_sem during
640 * faultin redo __gmap_translate to not race with a map/unmap_segment.
645 rc
= __gmap_link(gmap
, gaddr
, vmaddr
);
647 up_read(&gmap
->mm
->mmap_sem
);
650 EXPORT_SYMBOL_GPL(gmap_fault
);
653 * this function is assumed to be called with mmap_sem held
655 void __gmap_zap(struct gmap
*gmap
, unsigned long gaddr
)
657 unsigned long vmaddr
;
661 /* Find the vm address for the guest address */
662 vmaddr
= (unsigned long) radix_tree_lookup(&gmap
->guest_to_host
,
665 vmaddr
|= gaddr
& ~PMD_MASK
;
666 /* Get pointer to the page table entry */
667 ptep
= get_locked_pte(gmap
->mm
, vmaddr
, &ptl
);
669 ptep_zap_unused(gmap
->mm
, vmaddr
, ptep
, 0);
670 pte_unmap_unlock(ptep
, ptl
);
673 EXPORT_SYMBOL_GPL(__gmap_zap
);
675 void gmap_discard(struct gmap
*gmap
, unsigned long from
, unsigned long to
)
677 unsigned long gaddr
, vmaddr
, size
;
678 struct vm_area_struct
*vma
;
680 down_read(&gmap
->mm
->mmap_sem
);
681 for (gaddr
= from
; gaddr
< to
;
682 gaddr
= (gaddr
+ PMD_SIZE
) & PMD_MASK
) {
683 /* Find the vm address for the guest address */
684 vmaddr
= (unsigned long)
685 radix_tree_lookup(&gmap
->guest_to_host
,
689 vmaddr
|= gaddr
& ~PMD_MASK
;
690 /* Find vma in the parent mm */
691 vma
= find_vma(gmap
->mm
, vmaddr
);
694 size
= min(to
- gaddr
, PMD_SIZE
- (gaddr
& ~PMD_MASK
));
695 zap_page_range(vma
, vmaddr
, size
);
697 up_read(&gmap
->mm
->mmap_sem
);
699 EXPORT_SYMBOL_GPL(gmap_discard
);
701 static LIST_HEAD(gmap_notifier_list
);
702 static DEFINE_SPINLOCK(gmap_notifier_lock
);
705 * gmap_register_pte_notifier - register a pte invalidation callback
706 * @nb: pointer to the gmap notifier block
708 void gmap_register_pte_notifier(struct gmap_notifier
*nb
)
710 spin_lock(&gmap_notifier_lock
);
711 list_add_rcu(&nb
->list
, &gmap_notifier_list
);
712 spin_unlock(&gmap_notifier_lock
);
714 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier
);
717 * gmap_unregister_pte_notifier - remove a pte invalidation callback
718 * @nb: pointer to the gmap notifier block
720 void gmap_unregister_pte_notifier(struct gmap_notifier
*nb
)
722 spin_lock(&gmap_notifier_lock
);
723 list_del_rcu(&nb
->list
);
724 spin_unlock(&gmap_notifier_lock
);
727 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier
);
730 * gmap_call_notifier - call all registered invalidation callbacks
731 * @gmap: pointer to guest mapping meta data structure
732 * @start: start virtual address in the guest address space
733 * @end: end virtual address in the guest address space
735 static void gmap_call_notifier(struct gmap
*gmap
, unsigned long start
,
738 struct gmap_notifier
*nb
;
740 list_for_each_entry(nb
, &gmap_notifier_list
, list
)
741 nb
->notifier_call(gmap
, start
, end
);
745 * gmap_table_walk - walk the gmap page tables
746 * @gmap: pointer to guest mapping meta data structure
747 * @gaddr: virtual address in the guest address space
748 * @level: page table level to stop at
750 * Returns a table entry pointer for the given guest address and @level
751 * @level=0 : returns a pointer to a page table table entry (or NULL)
752 * @level=1 : returns a pointer to a segment table entry (or NULL)
753 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
754 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
755 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
757 * Returns NULL if the gmap page tables could not be walked to the
760 * Note: Can also be called for shadow gmaps.
762 static inline unsigned long *gmap_table_walk(struct gmap
*gmap
,
763 unsigned long gaddr
, int level
)
765 unsigned long *table
;
767 if ((gmap
->asce
& _ASCE_TYPE_MASK
) + 4 < (level
* 4))
769 if (gmap_is_shadow(gmap
) && gmap
->removed
)
771 if (gaddr
& (-1UL << (31 + ((gmap
->asce
& _ASCE_TYPE_MASK
) >> 2)*11)))
774 switch (gmap
->asce
& _ASCE_TYPE_MASK
) {
775 case _ASCE_TYPE_REGION1
:
776 table
+= (gaddr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
779 if (*table
& _REGION_ENTRY_INVALID
)
781 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
783 case _ASCE_TYPE_REGION2
:
784 table
+= (gaddr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
787 if (*table
& _REGION_ENTRY_INVALID
)
789 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
791 case _ASCE_TYPE_REGION3
:
792 table
+= (gaddr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
795 if (*table
& _REGION_ENTRY_INVALID
)
797 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
799 case _ASCE_TYPE_SEGMENT
:
800 table
+= (gaddr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
803 if (*table
& _REGION_ENTRY_INVALID
)
805 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
806 table
+= (gaddr
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
812 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
813 * and return the pte pointer
814 * @gmap: pointer to guest mapping meta data structure
815 * @gaddr: virtual address in the guest address space
816 * @ptl: pointer to the spinlock pointer
818 * Returns a pointer to the locked pte for a guest address, or NULL
820 * Note: Can also be called for shadow gmaps.
822 static pte_t
*gmap_pte_op_walk(struct gmap
*gmap
, unsigned long gaddr
,
825 unsigned long *table
;
827 if (gmap_is_shadow(gmap
))
828 spin_lock(&gmap
->guest_table_lock
);
829 /* Walk the gmap page table, lock and get pte pointer */
830 table
= gmap_table_walk(gmap
, gaddr
, 1); /* get segment pointer */
831 if (!table
|| *table
& _SEGMENT_ENTRY_INVALID
) {
832 if (gmap_is_shadow(gmap
))
833 spin_unlock(&gmap
->guest_table_lock
);
836 if (gmap_is_shadow(gmap
)) {
837 *ptl
= &gmap
->guest_table_lock
;
838 return pte_offset_map((pmd_t
*) table
, gaddr
);
840 return pte_alloc_map_lock(gmap
->mm
, (pmd_t
*) table
, gaddr
, ptl
);
844 * gmap_pte_op_fixup - force a page in and connect the gmap page table
845 * @gmap: pointer to guest mapping meta data structure
846 * @gaddr: virtual address in the guest address space
847 * @vmaddr: address in the host process address space
848 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
850 * Returns 0 if the caller can retry __gmap_translate (might fail again),
851 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
852 * up or connecting the gmap page table.
854 static int gmap_pte_op_fixup(struct gmap
*gmap
, unsigned long gaddr
,
855 unsigned long vmaddr
, int prot
)
857 struct mm_struct
*mm
= gmap
->mm
;
858 unsigned int fault_flags
;
859 bool unlocked
= false;
861 BUG_ON(gmap_is_shadow(gmap
));
862 fault_flags
= (prot
== PROT_WRITE
) ? FAULT_FLAG_WRITE
: 0;
863 if (fixup_user_fault(current
, mm
, vmaddr
, fault_flags
, &unlocked
))
866 /* lost mmap_sem, caller has to retry __gmap_translate */
868 /* Connect the page tables */
869 return __gmap_link(gmap
, gaddr
, vmaddr
);
873 * gmap_pte_op_end - release the page table lock
874 * @ptl: pointer to the spinlock pointer
876 static void gmap_pte_op_end(spinlock_t
*ptl
)
882 * gmap_protect_range - remove access rights to memory and set pgste bits
883 * @gmap: pointer to guest mapping meta data structure
884 * @gaddr: virtual address in the guest address space
886 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
887 * @bits: pgste notification bits to set
889 * Returns 0 if successfully protected, -ENOMEM if out of memory and
890 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
892 * Called with sg->mm->mmap_sem in read.
894 * Note: Can also be called for shadow gmaps.
896 static int gmap_protect_range(struct gmap
*gmap
, unsigned long gaddr
,
897 unsigned long len
, int prot
, unsigned long bits
)
899 unsigned long vmaddr
;
906 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
908 rc
= ptep_force_prot(gmap
->mm
, gaddr
, ptep
, prot
, bits
);
909 gmap_pte_op_end(ptl
);
912 vmaddr
= __gmap_translate(gmap
, gaddr
);
913 if (IS_ERR_VALUE(vmaddr
))
915 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
, prot
);
927 * gmap_mprotect_notify - change access rights for a range of ptes and
928 * call the notifier if any pte changes again
929 * @gmap: pointer to guest mapping meta data structure
930 * @gaddr: virtual address in the guest address space
932 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
934 * Returns 0 if for each page in the given range a gmap mapping exists,
935 * the new access rights could be set and the notifier could be armed.
936 * If the gmap mapping is missing for one or more pages -EFAULT is
937 * returned. If no memory could be allocated -ENOMEM is returned.
938 * This function establishes missing page table entries.
940 int gmap_mprotect_notify(struct gmap
*gmap
, unsigned long gaddr
,
941 unsigned long len
, int prot
)
945 if ((gaddr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) || gmap_is_shadow(gmap
))
947 if (!MACHINE_HAS_ESOP
&& prot
== PROT_READ
)
949 down_read(&gmap
->mm
->mmap_sem
);
950 rc
= gmap_protect_range(gmap
, gaddr
, len
, prot
, PGSTE_IN_BIT
);
951 up_read(&gmap
->mm
->mmap_sem
);
954 EXPORT_SYMBOL_GPL(gmap_mprotect_notify
);
957 * gmap_read_table - get an unsigned long value from a guest page table using
958 * absolute addressing, without marking the page referenced.
959 * @gmap: pointer to guest mapping meta data structure
960 * @gaddr: virtual address in the guest address space
961 * @val: pointer to the unsigned long value to return
963 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
964 * if reading using the virtual address failed.
966 * Called with gmap->mm->mmap_sem in read.
968 int gmap_read_table(struct gmap
*gmap
, unsigned long gaddr
, unsigned long *val
)
970 unsigned long address
, vmaddr
;
977 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
980 if (pte_present(pte
) && (pte_val(pte
) & _PAGE_READ
)) {
981 address
= pte_val(pte
) & PAGE_MASK
;
982 address
+= gaddr
& ~PAGE_MASK
;
983 *val
= *(unsigned long *) address
;
984 pte_val(*ptep
) |= _PAGE_YOUNG
;
985 /* Do *NOT* clear the _PAGE_INVALID bit! */
988 gmap_pte_op_end(ptl
);
992 vmaddr
= __gmap_translate(gmap
, gaddr
);
993 if (IS_ERR_VALUE(vmaddr
)) {
997 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
, PROT_READ
);
1003 EXPORT_SYMBOL_GPL(gmap_read_table
);
1006 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1007 * @sg: pointer to the shadow guest address space structure
1008 * @vmaddr: vm address associated with the rmap
1009 * @rmap: pointer to the rmap structure
1011 * Called with the sg->guest_table_lock
1013 static inline void gmap_insert_rmap(struct gmap
*sg
, unsigned long vmaddr
,
1014 struct gmap_rmap
*rmap
)
1018 BUG_ON(!gmap_is_shadow(sg
));
1019 slot
= radix_tree_lookup_slot(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
);
1021 rmap
->next
= radix_tree_deref_slot_protected(slot
,
1022 &sg
->guest_table_lock
);
1023 radix_tree_replace_slot(&sg
->host_to_rmap
, slot
, rmap
);
1026 radix_tree_insert(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
,
1032 * gmap_protect_rmap - modify access rights to memory and create an rmap
1033 * @sg: pointer to the shadow guest address space structure
1034 * @raddr: rmap address in the shadow gmap
1035 * @paddr: address in the parent guest address space
1036 * @len: length of the memory area to protect
1037 * @prot: indicates access rights: none, read-only or read-write
1039 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1040 * if out of memory and -EFAULT if paddr is invalid.
1042 static int gmap_protect_rmap(struct gmap
*sg
, unsigned long raddr
,
1043 unsigned long paddr
, unsigned long len
, int prot
)
1045 struct gmap
*parent
;
1046 struct gmap_rmap
*rmap
;
1047 unsigned long vmaddr
;
1052 BUG_ON(!gmap_is_shadow(sg
));
1053 parent
= sg
->parent
;
1055 vmaddr
= __gmap_translate(parent
, paddr
);
1056 if (IS_ERR_VALUE(vmaddr
))
1058 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1061 rmap
->raddr
= raddr
;
1062 rc
= radix_tree_preload(GFP_KERNEL
);
1068 ptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1070 spin_lock(&sg
->guest_table_lock
);
1071 rc
= ptep_force_prot(parent
->mm
, paddr
, ptep
, prot
,
1074 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1075 spin_unlock(&sg
->guest_table_lock
);
1076 gmap_pte_op_end(ptl
);
1078 radix_tree_preload_end();
1081 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
, prot
);
1092 #define _SHADOW_RMAP_MASK 0x7
1093 #define _SHADOW_RMAP_REGION1 0x5
1094 #define _SHADOW_RMAP_REGION2 0x4
1095 #define _SHADOW_RMAP_REGION3 0x3
1096 #define _SHADOW_RMAP_SEGMENT 0x2
1097 #define _SHADOW_RMAP_PGTABLE 0x1
1100 * gmap_idte_one - invalidate a single region or segment table entry
1101 * @asce: region or segment table *origin* + table-type bits
1102 * @vaddr: virtual address to identify the table entry to flush
1104 * The invalid bit of a single region or segment table entry is set
1105 * and the associated TLB entries depending on the entry are flushed.
1106 * The table-type of the @asce identifies the portion of the @vaddr
1107 * that is used as the invalidation index.
1109 static inline void gmap_idte_one(unsigned long asce
, unsigned long vaddr
)
1112 " .insn rrf,0xb98e0000,%0,%1,0,0"
1113 : : "a" (asce
), "a" (vaddr
) : "cc", "memory");
1117 * gmap_unshadow_page - remove a page from a shadow page table
1118 * @sg: pointer to the shadow guest address space structure
1119 * @raddr: rmap address in the shadow guest address space
1121 * Called with the sg->guest_table_lock
1123 static void gmap_unshadow_page(struct gmap
*sg
, unsigned long raddr
)
1125 unsigned long *table
;
1127 BUG_ON(!gmap_is_shadow(sg
));
1128 table
= gmap_table_walk(sg
, raddr
, 0); /* get page table pointer */
1129 if (!table
|| *table
& _PAGE_INVALID
)
1131 gmap_call_notifier(sg
, raddr
, raddr
+ _PAGE_SIZE
- 1);
1132 ptep_unshadow_pte(sg
->mm
, raddr
, (pte_t
*) table
);
1136 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1137 * @sg: pointer to the shadow guest address space structure
1138 * @raddr: rmap address in the shadow guest address space
1139 * @pgt: pointer to the start of a shadow page table
1141 * Called with the sg->guest_table_lock
1143 static void __gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
,
1148 BUG_ON(!gmap_is_shadow(sg
));
1149 for (i
= 0; i
< _PAGE_ENTRIES
; i
++, raddr
+= _PAGE_SIZE
)
1150 pgt
[i
] = _PAGE_INVALID
;
1154 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1155 * @sg: pointer to the shadow guest address space structure
1156 * @raddr: address in the shadow guest address space
1158 * Called with the sg->guest_table_lock
1160 static void gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
)
1162 unsigned long sto
, *ste
, *pgt
;
1165 BUG_ON(!gmap_is_shadow(sg
));
1166 ste
= gmap_table_walk(sg
, raddr
, 1); /* get segment pointer */
1167 if (!ste
|| !(*ste
& _SEGMENT_ENTRY_ORIGIN
))
1169 gmap_call_notifier(sg
, raddr
, raddr
+ _SEGMENT_SIZE
- 1);
1170 sto
= (unsigned long) (ste
- ((raddr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
));
1171 gmap_idte_one(sto
| _ASCE_TYPE_SEGMENT
, raddr
);
1172 pgt
= (unsigned long *)(*ste
& _SEGMENT_ENTRY_ORIGIN
);
1173 *ste
= _SEGMENT_ENTRY_EMPTY
;
1174 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1175 /* Free page table */
1176 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1177 list_del(&page
->lru
);
1178 page_table_free_pgste(page
);
1182 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1183 * @sg: pointer to the shadow guest address space structure
1184 * @raddr: rmap address in the shadow guest address space
1185 * @sgt: pointer to the start of a shadow segment table
1187 * Called with the sg->guest_table_lock
1189 static void __gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
,
1192 unsigned long asce
, *pgt
;
1196 BUG_ON(!gmap_is_shadow(sg
));
1197 asce
= (unsigned long) sgt
| _ASCE_TYPE_SEGMENT
;
1198 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _SEGMENT_SIZE
) {
1199 if (!(sgt
[i
] & _SEGMENT_ENTRY_ORIGIN
))
1201 pgt
= (unsigned long *)(sgt
[i
] & _REGION_ENTRY_ORIGIN
);
1202 sgt
[i
] = _SEGMENT_ENTRY_EMPTY
;
1203 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1204 /* Free page table */
1205 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1206 list_del(&page
->lru
);
1207 page_table_free_pgste(page
);
1212 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1213 * @sg: pointer to the shadow guest address space structure
1214 * @raddr: rmap address in the shadow guest address space
1216 * Called with the shadow->guest_table_lock
1218 static void gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
)
1220 unsigned long r3o
, *r3e
, *sgt
;
1223 BUG_ON(!gmap_is_shadow(sg
));
1224 r3e
= gmap_table_walk(sg
, raddr
, 2); /* get region-3 pointer */
1225 if (!r3e
|| !(*r3e
& _REGION_ENTRY_ORIGIN
))
1227 gmap_call_notifier(sg
, raddr
, raddr
+ _REGION3_SIZE
- 1);
1228 r3o
= (unsigned long) (r3e
- ((raddr
& _REGION3_INDEX
) >> _REGION3_SHIFT
));
1229 gmap_idte_one(r3o
| _ASCE_TYPE_REGION3
, raddr
);
1230 sgt
= (unsigned long *)(*r3e
& _REGION_ENTRY_ORIGIN
);
1231 *r3e
= _REGION3_ENTRY_EMPTY
;
1232 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1233 /* Free segment table */
1234 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1235 list_del(&page
->lru
);
1236 __free_pages(page
, CRST_ALLOC_ORDER
);
1240 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1241 * @sg: pointer to the shadow guest address space structure
1242 * @raddr: address in the shadow guest address space
1243 * @r3t: pointer to the start of a shadow region-3 table
1245 * Called with the sg->guest_table_lock
1247 static void __gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
,
1250 unsigned long asce
, *sgt
;
1254 BUG_ON(!gmap_is_shadow(sg
));
1255 asce
= (unsigned long) r3t
| _ASCE_TYPE_REGION3
;
1256 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _REGION3_SIZE
) {
1257 if (!(r3t
[i
] & _REGION_ENTRY_ORIGIN
))
1259 sgt
= (unsigned long *)(r3t
[i
] & _REGION_ENTRY_ORIGIN
);
1260 r3t
[i
] = _REGION3_ENTRY_EMPTY
;
1261 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1262 /* Free segment table */
1263 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1264 list_del(&page
->lru
);
1265 __free_pages(page
, CRST_ALLOC_ORDER
);
1270 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1271 * @sg: pointer to the shadow guest address space structure
1272 * @raddr: rmap address in the shadow guest address space
1274 * Called with the sg->guest_table_lock
1276 static void gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
)
1278 unsigned long r2o
, *r2e
, *r3t
;
1281 BUG_ON(!gmap_is_shadow(sg
));
1282 r2e
= gmap_table_walk(sg
, raddr
, 3); /* get region-2 pointer */
1283 if (!r2e
|| !(*r2e
& _REGION_ENTRY_ORIGIN
))
1285 gmap_call_notifier(sg
, raddr
, raddr
+ _REGION2_SIZE
- 1);
1286 r2o
= (unsigned long) (r2e
- ((raddr
& _REGION2_INDEX
) >> _REGION2_SHIFT
));
1287 gmap_idte_one(r2o
| _ASCE_TYPE_REGION2
, raddr
);
1288 r3t
= (unsigned long *)(*r2e
& _REGION_ENTRY_ORIGIN
);
1289 *r2e
= _REGION2_ENTRY_EMPTY
;
1290 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1291 /* Free region 3 table */
1292 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1293 list_del(&page
->lru
);
1294 __free_pages(page
, CRST_ALLOC_ORDER
);
1298 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1299 * @sg: pointer to the shadow guest address space structure
1300 * @raddr: rmap address in the shadow guest address space
1301 * @r2t: pointer to the start of a shadow region-2 table
1303 * Called with the sg->guest_table_lock
1305 static void __gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
,
1308 unsigned long asce
, *r3t
;
1312 BUG_ON(!gmap_is_shadow(sg
));
1313 asce
= (unsigned long) r2t
| _ASCE_TYPE_REGION2
;
1314 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _REGION2_SIZE
) {
1315 if (!(r2t
[i
] & _REGION_ENTRY_ORIGIN
))
1317 r3t
= (unsigned long *)(r2t
[i
] & _REGION_ENTRY_ORIGIN
);
1318 r2t
[i
] = _REGION2_ENTRY_EMPTY
;
1319 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1320 /* Free region 3 table */
1321 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1322 list_del(&page
->lru
);
1323 __free_pages(page
, CRST_ALLOC_ORDER
);
1328 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1329 * @sg: pointer to the shadow guest address space structure
1330 * @raddr: rmap address in the shadow guest address space
1332 * Called with the sg->guest_table_lock
1334 static void gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
)
1336 unsigned long r1o
, *r1e
, *r2t
;
1339 BUG_ON(!gmap_is_shadow(sg
));
1340 r1e
= gmap_table_walk(sg
, raddr
, 4); /* get region-1 pointer */
1341 if (!r1e
|| !(*r1e
& _REGION_ENTRY_ORIGIN
))
1343 gmap_call_notifier(sg
, raddr
, raddr
+ _REGION1_SIZE
- 1);
1344 r1o
= (unsigned long) (r1e
- ((raddr
& _REGION1_INDEX
) >> _REGION1_SHIFT
));
1345 gmap_idte_one(r1o
| _ASCE_TYPE_REGION1
, raddr
);
1346 r2t
= (unsigned long *)(*r1e
& _REGION_ENTRY_ORIGIN
);
1347 *r1e
= _REGION1_ENTRY_EMPTY
;
1348 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1349 /* Free region 2 table */
1350 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1351 list_del(&page
->lru
);
1352 __free_pages(page
, CRST_ALLOC_ORDER
);
1356 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1357 * @sg: pointer to the shadow guest address space structure
1358 * @raddr: rmap address in the shadow guest address space
1359 * @r1t: pointer to the start of a shadow region-1 table
1361 * Called with the shadow->guest_table_lock
1363 static void __gmap_unshadow_r1t(struct gmap
*sg
, unsigned long raddr
,
1366 unsigned long asce
, *r2t
;
1370 BUG_ON(!gmap_is_shadow(sg
));
1371 asce
= (unsigned long) r1t
| _ASCE_TYPE_REGION1
;
1372 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _REGION1_SIZE
) {
1373 if (!(r1t
[i
] & _REGION_ENTRY_ORIGIN
))
1375 r2t
= (unsigned long *)(r1t
[i
] & _REGION_ENTRY_ORIGIN
);
1376 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1377 /* Clear entry and flush translation r1t -> r2t */
1378 gmap_idte_one(asce
, raddr
);
1379 r1t
[i
] = _REGION1_ENTRY_EMPTY
;
1380 /* Free region 2 table */
1381 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1382 list_del(&page
->lru
);
1383 __free_pages(page
, CRST_ALLOC_ORDER
);
1388 * gmap_unshadow - remove a shadow page table completely
1389 * @sg: pointer to the shadow guest address space structure
1391 * Called with sg->guest_table_lock
1393 static void gmap_unshadow(struct gmap
*sg
)
1395 unsigned long *table
;
1397 BUG_ON(!gmap_is_shadow(sg
));
1401 gmap_call_notifier(sg
, 0, -1UL);
1403 table
= (unsigned long *)(sg
->asce
& _ASCE_ORIGIN
);
1404 switch (sg
->asce
& _ASCE_TYPE_MASK
) {
1405 case _ASCE_TYPE_REGION1
:
1406 __gmap_unshadow_r1t(sg
, 0, table
);
1408 case _ASCE_TYPE_REGION2
:
1409 __gmap_unshadow_r2t(sg
, 0, table
);
1411 case _ASCE_TYPE_REGION3
:
1412 __gmap_unshadow_r3t(sg
, 0, table
);
1414 case _ASCE_TYPE_SEGMENT
:
1415 __gmap_unshadow_sgt(sg
, 0, table
);
1421 * gmap_find_shadow - find a specific asce in the list of shadow tables
1422 * @parent: pointer to the parent gmap
1423 * @asce: ASCE for which the shadow table is created
1424 * @edat_level: edat level to be used for the shadow translation
1426 * Returns the pointer to a gmap if a shadow table with the given asce is
1427 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1430 static struct gmap
*gmap_find_shadow(struct gmap
*parent
, unsigned long asce
,
1435 list_for_each_entry(sg
, &parent
->children
, list
) {
1436 if (sg
->orig_asce
!= asce
|| sg
->edat_level
!= edat_level
||
1439 if (!sg
->initialized
)
1440 return ERR_PTR(-EAGAIN
);
1441 atomic_inc(&sg
->ref_count
);
1448 * gmap_shadow_valid - check if a shadow guest address space matches the
1449 * given properties and is still valid
1450 * @sg: pointer to the shadow guest address space structure
1451 * @asce: ASCE for which the shadow table is requested
1452 * @edat_level: edat level to be used for the shadow translation
1454 * Returns 1 if the gmap shadow is still valid and matches the given
1455 * properties, the caller can continue using it. Returns 0 otherwise, the
1456 * caller has to request a new shadow gmap in this case.
1459 int gmap_shadow_valid(struct gmap
*sg
, unsigned long asce
, int edat_level
)
1463 return sg
->orig_asce
== asce
&& sg
->edat_level
== edat_level
;
1465 EXPORT_SYMBOL_GPL(gmap_shadow_valid
);
1468 * gmap_shadow - create/find a shadow guest address space
1469 * @parent: pointer to the parent gmap
1470 * @asce: ASCE for which the shadow table is created
1471 * @edat_level: edat level to be used for the shadow translation
1473 * The pages of the top level page table referred by the asce parameter
1474 * will be set to read-only and marked in the PGSTEs of the kvm process.
1475 * The shadow table will be removed automatically on any change to the
1476 * PTE mapping for the source table.
1478 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1479 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1480 * parent gmap table could not be protected.
1482 struct gmap
*gmap_shadow(struct gmap
*parent
, unsigned long asce
,
1485 struct gmap
*sg
, *new;
1486 unsigned long limit
;
1489 BUG_ON(gmap_is_shadow(parent
));
1490 spin_lock(&parent
->shadow_lock
);
1491 sg
= gmap_find_shadow(parent
, asce
, edat_level
);
1492 spin_unlock(&parent
->shadow_lock
);
1495 /* Create a new shadow gmap */
1496 limit
= -1UL >> (33 - (((asce
& _ASCE_TYPE_MASK
) >> 2) * 11));
1497 if (asce
& _ASCE_REAL_SPACE
)
1499 new = gmap_alloc(limit
);
1501 return ERR_PTR(-ENOMEM
);
1502 new->mm
= parent
->mm
;
1503 new->parent
= gmap_get(parent
);
1504 new->orig_asce
= asce
;
1505 new->edat_level
= edat_level
;
1506 new->initialized
= false;
1507 spin_lock(&parent
->shadow_lock
);
1508 /* Recheck if another CPU created the same shadow */
1509 sg
= gmap_find_shadow(parent
, asce
, edat_level
);
1511 spin_unlock(&parent
->shadow_lock
);
1515 if (asce
& _ASCE_REAL_SPACE
) {
1516 /* only allow one real-space gmap shadow */
1517 list_for_each_entry(sg
, &parent
->children
, list
) {
1518 if (sg
->orig_asce
& _ASCE_REAL_SPACE
) {
1519 spin_lock(&sg
->guest_table_lock
);
1521 spin_unlock(&sg
->guest_table_lock
);
1522 list_del(&sg
->list
);
1528 atomic_set(&new->ref_count
, 2);
1529 list_add(&new->list
, &parent
->children
);
1530 if (asce
& _ASCE_REAL_SPACE
) {
1531 /* nothing to protect, return right away */
1532 new->initialized
= true;
1533 spin_unlock(&parent
->shadow_lock
);
1536 spin_unlock(&parent
->shadow_lock
);
1537 /* protect after insertion, so it will get properly invalidated */
1538 down_read(&parent
->mm
->mmap_sem
);
1539 rc
= gmap_protect_range(parent
, asce
& _ASCE_ORIGIN
,
1540 ((asce
& _ASCE_TABLE_LENGTH
) + 1) * PAGE_SIZE
,
1541 PROT_READ
, PGSTE_VSIE_BIT
);
1542 up_read(&parent
->mm
->mmap_sem
);
1543 spin_lock(&parent
->shadow_lock
);
1544 new->initialized
= true;
1546 list_del(&new->list
);
1550 spin_unlock(&parent
->shadow_lock
);
1553 EXPORT_SYMBOL_GPL(gmap_shadow
);
1556 * gmap_shadow_r2t - create an empty shadow region 2 table
1557 * @sg: pointer to the shadow guest address space structure
1558 * @saddr: faulting address in the shadow gmap
1559 * @r2t: parent gmap address of the region 2 table to get shadowed
1560 * @fake: r2t references contiguous guest memory block, not a r2t
1562 * The r2t parameter specifies the address of the source table. The
1563 * four pages of the source table are made read-only in the parent gmap
1564 * address space. A write to the source table area @r2t will automatically
1565 * remove the shadow r2 table and all of its decendents.
1567 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1568 * shadow table structure is incomplete, -ENOMEM if out of memory and
1569 * -EFAULT if an address in the parent gmap could not be resolved.
1571 * Called with sg->mm->mmap_sem in read.
1573 int gmap_shadow_r2t(struct gmap
*sg
, unsigned long saddr
, unsigned long r2t
,
1576 unsigned long raddr
, origin
, offset
, len
;
1577 unsigned long *s_r2t
, *table
;
1581 BUG_ON(!gmap_is_shadow(sg
));
1582 /* Allocate a shadow region second table */
1583 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
1586 page
->index
= r2t
& _REGION_ENTRY_ORIGIN
;
1588 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1589 s_r2t
= (unsigned long *) page_to_phys(page
);
1590 /* Install shadow region second table */
1591 spin_lock(&sg
->guest_table_lock
);
1592 table
= gmap_table_walk(sg
, saddr
, 4); /* get region-1 pointer */
1594 rc
= -EAGAIN
; /* Race with unshadow */
1597 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1598 rc
= 0; /* Already established */
1600 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1601 rc
= -EAGAIN
; /* Race with shadow */
1604 crst_table_init(s_r2t
, _REGION2_ENTRY_EMPTY
);
1605 /* mark as invalid as long as the parent table is not protected */
1606 *table
= (unsigned long) s_r2t
| _REGION_ENTRY_LENGTH
|
1607 _REGION_ENTRY_TYPE_R1
| _REGION_ENTRY_INVALID
;
1608 if (sg
->edat_level
>= 1)
1609 *table
|= (r2t
& _REGION_ENTRY_PROTECT
);
1610 list_add(&page
->lru
, &sg
->crst_list
);
1612 /* nothing to protect for fake tables */
1613 *table
&= ~_REGION_ENTRY_INVALID
;
1614 spin_unlock(&sg
->guest_table_lock
);
1617 spin_unlock(&sg
->guest_table_lock
);
1618 /* Make r2t read-only in parent gmap page table */
1619 raddr
= (saddr
& _REGION1_MASK
) | _SHADOW_RMAP_REGION1
;
1620 origin
= r2t
& _REGION_ENTRY_ORIGIN
;
1621 offset
= ((r2t
& _REGION_ENTRY_OFFSET
) >> 6) * PAGE_SIZE
;
1622 len
= ((r2t
& _REGION_ENTRY_LENGTH
) + 1) * PAGE_SIZE
- offset
;
1623 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1624 spin_lock(&sg
->guest_table_lock
);
1626 table
= gmap_table_walk(sg
, saddr
, 4);
1627 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1628 (unsigned long) s_r2t
)
1629 rc
= -EAGAIN
; /* Race with unshadow */
1631 *table
&= ~_REGION_ENTRY_INVALID
;
1633 gmap_unshadow_r2t(sg
, raddr
);
1635 spin_unlock(&sg
->guest_table_lock
);
1638 spin_unlock(&sg
->guest_table_lock
);
1639 __free_pages(page
, CRST_ALLOC_ORDER
);
1642 EXPORT_SYMBOL_GPL(gmap_shadow_r2t
);
1645 * gmap_shadow_r3t - create a shadow region 3 table
1646 * @sg: pointer to the shadow guest address space structure
1647 * @saddr: faulting address in the shadow gmap
1648 * @r3t: parent gmap address of the region 3 table to get shadowed
1649 * @fake: r3t references contiguous guest memory block, not a r3t
1651 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1652 * shadow table structure is incomplete, -ENOMEM if out of memory and
1653 * -EFAULT if an address in the parent gmap could not be resolved.
1655 * Called with sg->mm->mmap_sem in read.
1657 int gmap_shadow_r3t(struct gmap
*sg
, unsigned long saddr
, unsigned long r3t
,
1660 unsigned long raddr
, origin
, offset
, len
;
1661 unsigned long *s_r3t
, *table
;
1665 BUG_ON(!gmap_is_shadow(sg
));
1666 /* Allocate a shadow region second table */
1667 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
1670 page
->index
= r3t
& _REGION_ENTRY_ORIGIN
;
1672 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1673 s_r3t
= (unsigned long *) page_to_phys(page
);
1674 /* Install shadow region second table */
1675 spin_lock(&sg
->guest_table_lock
);
1676 table
= gmap_table_walk(sg
, saddr
, 3); /* get region-2 pointer */
1678 rc
= -EAGAIN
; /* Race with unshadow */
1681 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1682 rc
= 0; /* Already established */
1684 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1685 rc
= -EAGAIN
; /* Race with shadow */
1687 crst_table_init(s_r3t
, _REGION3_ENTRY_EMPTY
);
1688 /* mark as invalid as long as the parent table is not protected */
1689 *table
= (unsigned long) s_r3t
| _REGION_ENTRY_LENGTH
|
1690 _REGION_ENTRY_TYPE_R2
| _REGION_ENTRY_INVALID
;
1691 if (sg
->edat_level
>= 1)
1692 *table
|= (r3t
& _REGION_ENTRY_PROTECT
);
1693 list_add(&page
->lru
, &sg
->crst_list
);
1695 /* nothing to protect for fake tables */
1696 *table
&= ~_REGION_ENTRY_INVALID
;
1697 spin_unlock(&sg
->guest_table_lock
);
1700 spin_unlock(&sg
->guest_table_lock
);
1701 /* Make r3t read-only in parent gmap page table */
1702 raddr
= (saddr
& _REGION2_MASK
) | _SHADOW_RMAP_REGION2
;
1703 origin
= r3t
& _REGION_ENTRY_ORIGIN
;
1704 offset
= ((r3t
& _REGION_ENTRY_OFFSET
) >> 6) * PAGE_SIZE
;
1705 len
= ((r3t
& _REGION_ENTRY_LENGTH
) + 1) * PAGE_SIZE
- offset
;
1706 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1707 spin_lock(&sg
->guest_table_lock
);
1709 table
= gmap_table_walk(sg
, saddr
, 3);
1710 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1711 (unsigned long) s_r3t
)
1712 rc
= -EAGAIN
; /* Race with unshadow */
1714 *table
&= ~_REGION_ENTRY_INVALID
;
1716 gmap_unshadow_r3t(sg
, raddr
);
1718 spin_unlock(&sg
->guest_table_lock
);
1721 spin_unlock(&sg
->guest_table_lock
);
1722 __free_pages(page
, CRST_ALLOC_ORDER
);
1725 EXPORT_SYMBOL_GPL(gmap_shadow_r3t
);
1728 * gmap_shadow_sgt - create a shadow segment table
1729 * @sg: pointer to the shadow guest address space structure
1730 * @saddr: faulting address in the shadow gmap
1731 * @sgt: parent gmap address of the segment table to get shadowed
1732 * @fake: sgt references contiguous guest memory block, not a sgt
1734 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1735 * shadow table structure is incomplete, -ENOMEM if out of memory and
1736 * -EFAULT if an address in the parent gmap could not be resolved.
1738 * Called with sg->mm->mmap_sem in read.
1740 int gmap_shadow_sgt(struct gmap
*sg
, unsigned long saddr
, unsigned long sgt
,
1743 unsigned long raddr
, origin
, offset
, len
;
1744 unsigned long *s_sgt
, *table
;
1748 BUG_ON(!gmap_is_shadow(sg
) || (sgt
& _REGION3_ENTRY_LARGE
));
1749 /* Allocate a shadow segment table */
1750 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
1753 page
->index
= sgt
& _REGION_ENTRY_ORIGIN
;
1755 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1756 s_sgt
= (unsigned long *) page_to_phys(page
);
1757 /* Install shadow region second table */
1758 spin_lock(&sg
->guest_table_lock
);
1759 table
= gmap_table_walk(sg
, saddr
, 2); /* get region-3 pointer */
1761 rc
= -EAGAIN
; /* Race with unshadow */
1764 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1765 rc
= 0; /* Already established */
1767 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1768 rc
= -EAGAIN
; /* Race with shadow */
1771 crst_table_init(s_sgt
, _SEGMENT_ENTRY_EMPTY
);
1772 /* mark as invalid as long as the parent table is not protected */
1773 *table
= (unsigned long) s_sgt
| _REGION_ENTRY_LENGTH
|
1774 _REGION_ENTRY_TYPE_R3
| _REGION_ENTRY_INVALID
;
1775 if (sg
->edat_level
>= 1)
1776 *table
|= sgt
& _REGION_ENTRY_PROTECT
;
1777 list_add(&page
->lru
, &sg
->crst_list
);
1779 /* nothing to protect for fake tables */
1780 *table
&= ~_REGION_ENTRY_INVALID
;
1781 spin_unlock(&sg
->guest_table_lock
);
1784 spin_unlock(&sg
->guest_table_lock
);
1785 /* Make sgt read-only in parent gmap page table */
1786 raddr
= (saddr
& _REGION3_MASK
) | _SHADOW_RMAP_REGION3
;
1787 origin
= sgt
& _REGION_ENTRY_ORIGIN
;
1788 offset
= ((sgt
& _REGION_ENTRY_OFFSET
) >> 6) * PAGE_SIZE
;
1789 len
= ((sgt
& _REGION_ENTRY_LENGTH
) + 1) * PAGE_SIZE
- offset
;
1790 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1791 spin_lock(&sg
->guest_table_lock
);
1793 table
= gmap_table_walk(sg
, saddr
, 2);
1794 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1795 (unsigned long) s_sgt
)
1796 rc
= -EAGAIN
; /* Race with unshadow */
1798 *table
&= ~_REGION_ENTRY_INVALID
;
1800 gmap_unshadow_sgt(sg
, raddr
);
1802 spin_unlock(&sg
->guest_table_lock
);
1805 spin_unlock(&sg
->guest_table_lock
);
1806 __free_pages(page
, CRST_ALLOC_ORDER
);
1809 EXPORT_SYMBOL_GPL(gmap_shadow_sgt
);
1812 * gmap_shadow_lookup_pgtable - find a shadow page table
1813 * @sg: pointer to the shadow guest address space structure
1814 * @saddr: the address in the shadow aguest address space
1815 * @pgt: parent gmap address of the page table to get shadowed
1816 * @dat_protection: if the pgtable is marked as protected by dat
1817 * @fake: pgt references contiguous guest memory block, not a pgtable
1819 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1820 * table was not found.
1822 * Called with sg->mm->mmap_sem in read.
1824 int gmap_shadow_pgt_lookup(struct gmap
*sg
, unsigned long saddr
,
1825 unsigned long *pgt
, int *dat_protection
,
1828 unsigned long *table
;
1832 BUG_ON(!gmap_is_shadow(sg
));
1833 spin_lock(&sg
->guest_table_lock
);
1834 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1835 if (table
&& !(*table
& _SEGMENT_ENTRY_INVALID
)) {
1836 /* Shadow page tables are full pages (pte+pgste) */
1837 page
= pfn_to_page(*table
>> PAGE_SHIFT
);
1838 *pgt
= page
->index
& ~GMAP_SHADOW_FAKE_TABLE
;
1839 *dat_protection
= !!(*table
& _SEGMENT_ENTRY_PROTECT
);
1840 *fake
= !!(page
->index
& GMAP_SHADOW_FAKE_TABLE
);
1845 spin_unlock(&sg
->guest_table_lock
);
1849 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup
);
1852 * gmap_shadow_pgt - instantiate a shadow page table
1853 * @sg: pointer to the shadow guest address space structure
1854 * @saddr: faulting address in the shadow gmap
1855 * @pgt: parent gmap address of the page table to get shadowed
1856 * @fake: pgt references contiguous guest memory block, not a pgtable
1858 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1859 * shadow table structure is incomplete, -ENOMEM if out of memory,
1860 * -EFAULT if an address in the parent gmap could not be resolved and
1862 * Called with gmap->mm->mmap_sem in read
1864 int gmap_shadow_pgt(struct gmap
*sg
, unsigned long saddr
, unsigned long pgt
,
1867 unsigned long raddr
, origin
;
1868 unsigned long *s_pgt
, *table
;
1872 BUG_ON(!gmap_is_shadow(sg
) || (pgt
& _SEGMENT_ENTRY_LARGE
));
1873 /* Allocate a shadow page table */
1874 page
= page_table_alloc_pgste(sg
->mm
);
1877 page
->index
= pgt
& _SEGMENT_ENTRY_ORIGIN
;
1879 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1880 s_pgt
= (unsigned long *) page_to_phys(page
);
1881 /* Install shadow page table */
1882 spin_lock(&sg
->guest_table_lock
);
1883 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1885 rc
= -EAGAIN
; /* Race with unshadow */
1888 if (!(*table
& _SEGMENT_ENTRY_INVALID
)) {
1889 rc
= 0; /* Already established */
1891 } else if (*table
& _SEGMENT_ENTRY_ORIGIN
) {
1892 rc
= -EAGAIN
; /* Race with shadow */
1895 /* mark as invalid as long as the parent table is not protected */
1896 *table
= (unsigned long) s_pgt
| _SEGMENT_ENTRY
|
1897 (pgt
& _SEGMENT_ENTRY_PROTECT
) | _SEGMENT_ENTRY_INVALID
;
1898 list_add(&page
->lru
, &sg
->pt_list
);
1900 /* nothing to protect for fake tables */
1901 *table
&= ~_SEGMENT_ENTRY_INVALID
;
1902 spin_unlock(&sg
->guest_table_lock
);
1905 spin_unlock(&sg
->guest_table_lock
);
1906 /* Make pgt read-only in parent gmap page table (not the pgste) */
1907 raddr
= (saddr
& _SEGMENT_MASK
) | _SHADOW_RMAP_SEGMENT
;
1908 origin
= pgt
& _SEGMENT_ENTRY_ORIGIN
& PAGE_MASK
;
1909 rc
= gmap_protect_rmap(sg
, raddr
, origin
, PAGE_SIZE
, PROT_READ
);
1910 spin_lock(&sg
->guest_table_lock
);
1912 table
= gmap_table_walk(sg
, saddr
, 1);
1913 if (!table
|| (*table
& _SEGMENT_ENTRY_ORIGIN
) !=
1914 (unsigned long) s_pgt
)
1915 rc
= -EAGAIN
; /* Race with unshadow */
1917 *table
&= ~_SEGMENT_ENTRY_INVALID
;
1919 gmap_unshadow_pgt(sg
, raddr
);
1921 spin_unlock(&sg
->guest_table_lock
);
1924 spin_unlock(&sg
->guest_table_lock
);
1925 page_table_free_pgste(page
);
1929 EXPORT_SYMBOL_GPL(gmap_shadow_pgt
);
1932 * gmap_shadow_page - create a shadow page mapping
1933 * @sg: pointer to the shadow guest address space structure
1934 * @saddr: faulting address in the shadow gmap
1935 * @pte: pte in parent gmap address space to get shadowed
1937 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1938 * shadow table structure is incomplete, -ENOMEM if out of memory and
1939 * -EFAULT if an address in the parent gmap could not be resolved.
1941 * Called with sg->mm->mmap_sem in read.
1943 int gmap_shadow_page(struct gmap
*sg
, unsigned long saddr
, pte_t pte
)
1945 struct gmap
*parent
;
1946 struct gmap_rmap
*rmap
;
1947 unsigned long vmaddr
, paddr
;
1949 pte_t
*sptep
, *tptep
;
1953 BUG_ON(!gmap_is_shadow(sg
));
1954 parent
= sg
->parent
;
1955 prot
= (pte_val(pte
) & _PAGE_PROTECT
) ? PROT_READ
: PROT_WRITE
;
1957 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1960 rmap
->raddr
= (saddr
& PAGE_MASK
) | _SHADOW_RMAP_PGTABLE
;
1963 paddr
= pte_val(pte
) & PAGE_MASK
;
1964 vmaddr
= __gmap_translate(parent
, paddr
);
1965 if (IS_ERR_VALUE(vmaddr
)) {
1969 rc
= radix_tree_preload(GFP_KERNEL
);
1973 sptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1975 spin_lock(&sg
->guest_table_lock
);
1976 /* Get page table pointer */
1977 tptep
= (pte_t
*) gmap_table_walk(sg
, saddr
, 0);
1979 spin_unlock(&sg
->guest_table_lock
);
1980 gmap_pte_op_end(ptl
);
1981 radix_tree_preload_end();
1984 rc
= ptep_shadow_pte(sg
->mm
, saddr
, sptep
, tptep
, pte
);
1986 /* Success and a new mapping */
1987 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1991 gmap_pte_op_end(ptl
);
1992 spin_unlock(&sg
->guest_table_lock
);
1994 radix_tree_preload_end();
1997 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
, prot
);
2004 EXPORT_SYMBOL_GPL(gmap_shadow_page
);
2007 * gmap_shadow_notify - handle notifications for shadow gmap
2009 * Called with sg->parent->shadow_lock.
2011 static void gmap_shadow_notify(struct gmap
*sg
, unsigned long vmaddr
,
2012 unsigned long gaddr
, pte_t
*pte
)
2014 struct gmap_rmap
*rmap
, *rnext
, *head
;
2015 unsigned long start
, end
, bits
, raddr
;
2017 BUG_ON(!gmap_is_shadow(sg
));
2019 spin_lock(&sg
->guest_table_lock
);
2021 spin_unlock(&sg
->guest_table_lock
);
2024 /* Check for top level table */
2025 start
= sg
->orig_asce
& _ASCE_ORIGIN
;
2026 end
= start
+ ((sg
->orig_asce
& _ASCE_TABLE_LENGTH
) + 1) * PAGE_SIZE
;
2027 if (!(sg
->orig_asce
& _ASCE_REAL_SPACE
) && gaddr
>= start
&&
2029 /* The complete shadow table has to go */
2031 spin_unlock(&sg
->guest_table_lock
);
2032 list_del(&sg
->list
);
2036 /* Remove the page table tree from on specific entry */
2037 head
= radix_tree_delete(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
);
2038 gmap_for_each_rmap_safe(rmap
, rnext
, head
) {
2039 bits
= rmap
->raddr
& _SHADOW_RMAP_MASK
;
2040 raddr
= rmap
->raddr
^ bits
;
2042 case _SHADOW_RMAP_REGION1
:
2043 gmap_unshadow_r2t(sg
, raddr
);
2045 case _SHADOW_RMAP_REGION2
:
2046 gmap_unshadow_r3t(sg
, raddr
);
2048 case _SHADOW_RMAP_REGION3
:
2049 gmap_unshadow_sgt(sg
, raddr
);
2051 case _SHADOW_RMAP_SEGMENT
:
2052 gmap_unshadow_pgt(sg
, raddr
);
2054 case _SHADOW_RMAP_PGTABLE
:
2055 gmap_unshadow_page(sg
, raddr
);
2060 spin_unlock(&sg
->guest_table_lock
);
2064 * ptep_notify - call all invalidation callbacks for a specific pte.
2065 * @mm: pointer to the process mm_struct
2066 * @addr: virtual address in the process address space
2067 * @pte: pointer to the page table entry
2068 * @bits: bits from the pgste that caused the notify call
2070 * This function is assumed to be called with the page table lock held
2071 * for the pte to notify.
2073 void ptep_notify(struct mm_struct
*mm
, unsigned long vmaddr
,
2074 pte_t
*pte
, unsigned long bits
)
2076 unsigned long offset
, gaddr
= 0;
2077 unsigned long *table
;
2078 struct gmap
*gmap
, *sg
, *next
;
2080 offset
= ((unsigned long) pte
) & (255 * sizeof(pte_t
));
2081 offset
= offset
* (PAGE_SIZE
/ sizeof(pte_t
));
2083 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
2084 spin_lock(&gmap
->guest_table_lock
);
2085 table
= radix_tree_lookup(&gmap
->host_to_guest
,
2086 vmaddr
>> PMD_SHIFT
);
2088 gaddr
= __gmap_segment_gaddr(table
) + offset
;
2089 spin_unlock(&gmap
->guest_table_lock
);
2093 if (!list_empty(&gmap
->children
) && (bits
& PGSTE_VSIE_BIT
)) {
2094 spin_lock(&gmap
->shadow_lock
);
2095 list_for_each_entry_safe(sg
, next
,
2096 &gmap
->children
, list
)
2097 gmap_shadow_notify(sg
, vmaddr
, gaddr
, pte
);
2098 spin_unlock(&gmap
->shadow_lock
);
2100 if (bits
& PGSTE_IN_BIT
)
2101 gmap_call_notifier(gmap
, gaddr
, gaddr
+ PAGE_SIZE
- 1);
2105 EXPORT_SYMBOL_GPL(ptep_notify
);
2107 static inline void thp_split_mm(struct mm_struct
*mm
)
2109 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2110 struct vm_area_struct
*vma
;
2113 for (vma
= mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
2114 for (addr
= vma
->vm_start
;
2117 follow_page(vma
, addr
, FOLL_SPLIT
);
2118 vma
->vm_flags
&= ~VM_HUGEPAGE
;
2119 vma
->vm_flags
|= VM_NOHUGEPAGE
;
2121 mm
->def_flags
|= VM_NOHUGEPAGE
;
2126 * Remove all empty zero pages from the mapping for lazy refaulting
2127 * - This must be called after mm->context.has_pgste is set, to avoid
2128 * future creation of zero pages
2129 * - This must be called after THP was enabled
2131 static int __zap_zero_pages(pmd_t
*pmd
, unsigned long start
,
2132 unsigned long end
, struct mm_walk
*walk
)
2136 for (addr
= start
; addr
!= end
; addr
+= PAGE_SIZE
) {
2140 ptep
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
2141 if (is_zero_pfn(pte_pfn(*ptep
)))
2142 ptep_xchg_direct(walk
->mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
2143 pte_unmap_unlock(ptep
, ptl
);
2148 static inline void zap_zero_pages(struct mm_struct
*mm
)
2150 struct mm_walk walk
= { .pmd_entry
= __zap_zero_pages
};
2153 walk_page_range(0, TASK_SIZE
, &walk
);
2157 * switch on pgstes for its userspace process (for kvm)
2159 int s390_enable_sie(void)
2161 struct mm_struct
*mm
= current
->mm
;
2163 /* Do we have pgstes? if yes, we are done */
2164 if (mm_has_pgste(mm
))
2166 /* Fail if the page tables are 2K */
2167 if (!mm_alloc_pgste(mm
))
2169 down_write(&mm
->mmap_sem
);
2170 mm
->context
.has_pgste
= 1;
2171 /* split thp mappings and disable thp for future mappings */
2174 up_write(&mm
->mmap_sem
);
2177 EXPORT_SYMBOL_GPL(s390_enable_sie
);
2180 * Enable storage key handling from now on and initialize the storage
2181 * keys with the default key.
2183 static int __s390_enable_skey(pte_t
*pte
, unsigned long addr
,
2184 unsigned long next
, struct mm_walk
*walk
)
2186 /* Clear storage key */
2187 ptep_zap_key(walk
->mm
, addr
, pte
);
2191 int s390_enable_skey(void)
2193 struct mm_walk walk
= { .pte_entry
= __s390_enable_skey
};
2194 struct mm_struct
*mm
= current
->mm
;
2195 struct vm_area_struct
*vma
;
2198 down_write(&mm
->mmap_sem
);
2199 if (mm_use_skey(mm
))
2202 mm
->context
.use_skey
= 1;
2203 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
2204 if (ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
2205 MADV_UNMERGEABLE
, &vma
->vm_flags
)) {
2206 mm
->context
.use_skey
= 0;
2211 mm
->def_flags
&= ~VM_MERGEABLE
;
2214 walk_page_range(0, TASK_SIZE
, &walk
);
2217 up_write(&mm
->mmap_sem
);
2220 EXPORT_SYMBOL_GPL(s390_enable_skey
);
2223 * Reset CMMA state, make all pages stable again.
2225 static int __s390_reset_cmma(pte_t
*pte
, unsigned long addr
,
2226 unsigned long next
, struct mm_walk
*walk
)
2228 ptep_zap_unused(walk
->mm
, addr
, pte
, 1);
2232 void s390_reset_cmma(struct mm_struct
*mm
)
2234 struct mm_walk walk
= { .pte_entry
= __s390_reset_cmma
};
2236 down_write(&mm
->mmap_sem
);
2238 walk_page_range(0, TASK_SIZE
, &walk
);
2239 up_write(&mm
->mmap_sem
);
2241 EXPORT_SYMBOL_GPL(s390_reset_cmma
);