1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #ifndef __ASM_LOONGARCH_KVM_MMU_H__
7 #define __ASM_LOONGARCH_KVM_MMU_H__
9 #include <linux/kvm_host.h>
10 #include <asm/pgalloc.h>
14 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
15 * for which pages need to be cached.
17 #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
19 #define _KVM_FLUSH_PGTABLE 0x1
20 #define _KVM_HAS_PGMASK 0x2
21 #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
22 #define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT))
24 typedef unsigned long kvm_pte_t
;
25 typedef struct kvm_ptw_ctx kvm_ptw_ctx
;
26 typedef int (*kvm_pte_ops
)(kvm_pte_t
*pte
, phys_addr_t addr
, kvm_ptw_ctx
*ctx
);
32 /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */
36 /* page walk mmu info */
38 unsigned long pgtable_shift
;
39 unsigned long invalid_entry
;
40 unsigned long *invalid_ptes
;
41 unsigned int *pte_shifts
;
44 /* free pte table page list */
45 struct list_head list
;
48 kvm_pte_t
*kvm_pgd_alloc(void);
50 static inline void kvm_set_pte(kvm_pte_t
*ptep
, kvm_pte_t val
)
52 WRITE_ONCE(*ptep
, val
);
55 static inline int kvm_pte_write(kvm_pte_t pte
) { return pte
& _PAGE_WRITE
; }
56 static inline int kvm_pte_dirty(kvm_pte_t pte
) { return pte
& _PAGE_DIRTY
; }
57 static inline int kvm_pte_young(kvm_pte_t pte
) { return pte
& _PAGE_ACCESSED
; }
58 static inline int kvm_pte_huge(kvm_pte_t pte
) { return pte
& _PAGE_HUGE
; }
60 static inline kvm_pte_t
kvm_pte_mkyoung(kvm_pte_t pte
)
62 return pte
| _PAGE_ACCESSED
;
65 static inline kvm_pte_t
kvm_pte_mkold(kvm_pte_t pte
)
67 return pte
& ~_PAGE_ACCESSED
;
70 static inline kvm_pte_t
kvm_pte_mkdirty(kvm_pte_t pte
)
72 return pte
| _PAGE_DIRTY
;
75 static inline kvm_pte_t
kvm_pte_mkclean(kvm_pte_t pte
)
77 return pte
& ~_PAGE_DIRTY
;
80 static inline kvm_pte_t
kvm_pte_mkhuge(kvm_pte_t pte
)
82 return pte
| _PAGE_HUGE
;
85 static inline kvm_pte_t
kvm_pte_mksmall(kvm_pte_t pte
)
87 return pte
& ~_PAGE_HUGE
;
90 static inline int kvm_need_flush(kvm_ptw_ctx
*ctx
)
92 return ctx
->flag
& _KVM_FLUSH_PGTABLE
;
95 static inline kvm_pte_t
*kvm_pgtable_offset(kvm_ptw_ctx
*ctx
, kvm_pte_t
*table
,
99 return table
+ ((addr
>> ctx
->pgtable_shift
) & (PTRS_PER_PTE
- 1));
102 static inline phys_addr_t
kvm_pgtable_addr_end(kvm_ptw_ctx
*ctx
,
103 phys_addr_t addr
, phys_addr_t end
)
105 phys_addr_t boundary
, size
;
107 size
= 0x1UL
<< ctx
->pgtable_shift
;
108 boundary
= (addr
+ size
) & ~(size
- 1);
109 return (boundary
- 1 < end
- 1) ? boundary
: end
;
112 static inline int kvm_pte_present(kvm_ptw_ctx
*ctx
, kvm_pte_t
*entry
)
114 if (!ctx
|| ctx
->level
== 0)
115 return !!(*entry
& _PAGE_PRESENT
);
117 return *entry
!= ctx
->invalid_entry
;
120 static inline int kvm_pte_none(kvm_ptw_ctx
*ctx
, kvm_pte_t
*entry
)
122 return *entry
== ctx
->invalid_entry
;
125 static inline void kvm_ptw_enter(kvm_ptw_ctx
*ctx
)
128 ctx
->pgtable_shift
= ctx
->pte_shifts
[ctx
->level
];
129 ctx
->invalid_entry
= ctx
->invalid_ptes
[ctx
->level
];
132 static inline void kvm_ptw_exit(kvm_ptw_ctx
*ctx
)
135 ctx
->pgtable_shift
= ctx
->pte_shifts
[ctx
->level
];
136 ctx
->invalid_entry
= ctx
->invalid_ptes
[ctx
->level
];
139 #endif /* __ASM_LOONGARCH_KVM_MMU_H__ */