drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / virt / kvm / kvm_mm.h
blobacef3f5c582ae706b6a4b7785aa0b96afd63799a
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef __KVM_MM_H__
4 #define __KVM_MM_H__ 1
6 /*
7 * Architectures can choose whether to use an rwlock or spinlock
8 * for the mmu_lock. These macros, for use in common code
9 * only, avoids using #ifdefs in places that must deal with
10 * multiple architectures.
13 #ifdef KVM_HAVE_MMU_RWLOCK
14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
17 #else
18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
21 #endif /* KVM_HAVE_MMU_RWLOCK */
24 struct kvm_follow_pfn {
25 const struct kvm_memory_slot *slot;
26 const gfn_t gfn;
28 unsigned long hva;
30 /* FOLL_* flags modifying lookup behavior, e.g. FOLL_WRITE. */
31 unsigned int flags;
34 * Pin the page (effectively FOLL_PIN, which is an mm/ internal flag).
35 * The page *must* be pinned if KVM will write to the page via a kernel
36 * mapping, e.g. via kmap(), mremap(), etc.
38 bool pin;
41 * If non-NULL, try to get a writable mapping even for a read fault.
42 * Set to true if a writable mapping was obtained.
44 bool *map_writable;
47 * Optional output. Set to a valid "struct page" if the returned pfn
48 * is for a refcounted or pinned struct page, NULL if the returned pfn
49 * has no struct page or if the struct page is not being refcounted
50 * (e.g. tail pages of non-compound higher order allocations from
51 * IO/PFNMAP mappings).
53 struct page **refcounted_page;
56 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
58 #ifdef CONFIG_HAVE_KVM_PFNCACHE
59 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
60 unsigned long start,
61 unsigned long end);
62 #else
63 static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
64 unsigned long start,
65 unsigned long end)
68 #endif /* HAVE_KVM_PFNCACHE */
70 #ifdef CONFIG_KVM_PRIVATE_MEM
71 void kvm_gmem_init(struct module *module);
72 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
73 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
74 unsigned int fd, loff_t offset);
75 void kvm_gmem_unbind(struct kvm_memory_slot *slot);
76 #else
77 static inline void kvm_gmem_init(struct module *module)
82 static inline int kvm_gmem_bind(struct kvm *kvm,
83 struct kvm_memory_slot *slot,
84 unsigned int fd, loff_t offset)
86 WARN_ON_ONCE(1);
87 return -EIO;
90 static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
92 WARN_ON_ONCE(1);
94 #endif /* CONFIG_KVM_PRIVATE_MEM */
96 #endif /* __KVM_MM_H__ */