Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / msm / msm_gem.h
blobb3a0a880cbabe8a005a388e748bf3b2113107df7
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
7 #ifndef __MSM_GEM_H__
8 #define __MSM_GEM_H__
10 #include <linux/kref.h>
11 #include <linux/dma-resv.h>
12 #include "msm_drv.h"
14 /* Additional internal-use only BO flags: */
15 #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
16 #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
18 struct msm_gem_address_space {
19 const char *name;
20 /* NOTE: mm managed at the page level, size is in # of pages
21 * and position mm_node->start is in # of pages:
23 struct drm_mm mm;
24 spinlock_t lock; /* Protects drm_mm node allocation/removal */
25 struct msm_mmu *mmu;
26 struct kref kref;
28 /* For address spaces associated with a specific process, this
29 * will be non-NULL:
31 struct pid *pid;
34 struct msm_gem_vma {
35 struct drm_mm_node node;
36 uint64_t iova;
37 struct msm_gem_address_space *aspace;
38 struct list_head list; /* node in msm_gem_object::vmas */
39 bool mapped;
40 int inuse;
43 struct msm_gem_object {
44 struct drm_gem_object base;
46 uint32_t flags;
48 /**
49 * Advice: are the backing pages purgeable?
51 uint8_t madv;
53 /**
54 * count of active vmap'ing
56 uint8_t vmap_count;
58 /* And object is either:
59 * inactive - on priv->inactive_list
60 * active - on one one of the gpu's active_list.. well, at
61 * least for now we don't have (I don't think) hw sync between
62 * 2d and 3d one devices which have both, meaning we need to
63 * block on submit if a bo is already on other ring
66 struct list_head mm_list;
68 /* Transiently in the process of submit ioctl, objects associated
69 * with the submit are on submit->bo_list.. this only lasts for
70 * the duration of the ioctl, so one bo can never be on multiple
71 * submit lists.
73 struct list_head submit_entry;
75 struct page **pages;
76 struct sg_table *sgt;
77 void *vaddr;
79 struct list_head vmas; /* list of msm_gem_vma */
81 struct llist_node freed;
83 /* For physically contiguous buffers. Used when we don't have
84 * an IOMMU. Also used for stolen/splashscreen buffer.
86 struct drm_mm_node *vram_node;
88 char name[32]; /* Identifier to print for the debugfs files */
90 int active_count;
92 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
94 int msm_gem_mmap_obj(struct drm_gem_object *obj,
95 struct vm_area_struct *vma);
96 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
97 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
98 int msm_gem_get_iova(struct drm_gem_object *obj,
99 struct msm_gem_address_space *aspace, uint64_t *iova);
100 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
101 struct msm_gem_address_space *aspace, uint64_t *iova,
102 u64 range_start, u64 range_end);
103 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
104 struct msm_gem_address_space *aspace, uint64_t *iova);
105 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
106 struct msm_gem_address_space *aspace, uint64_t *iova);
107 uint64_t msm_gem_iova(struct drm_gem_object *obj,
108 struct msm_gem_address_space *aspace);
109 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
110 struct msm_gem_address_space *aspace);
111 void msm_gem_unpin_iova(struct drm_gem_object *obj,
112 struct msm_gem_address_space *aspace);
113 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
114 void msm_gem_put_pages(struct drm_gem_object *obj);
115 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
116 struct drm_mode_create_dumb *args);
117 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
118 uint32_t handle, uint64_t *offset);
119 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
120 void *msm_gem_get_vaddr(struct drm_gem_object *obj);
121 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
122 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
123 void msm_gem_put_vaddr(struct drm_gem_object *obj);
124 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
125 int msm_gem_sync_object(struct drm_gem_object *obj,
126 struct msm_fence_context *fctx, bool exclusive);
127 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
128 void msm_gem_active_put(struct drm_gem_object *obj);
129 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
130 int msm_gem_cpu_fini(struct drm_gem_object *obj);
131 void msm_gem_free_object(struct drm_gem_object *obj);
132 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
133 uint32_t size, uint32_t flags, uint32_t *handle, char *name);
134 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
135 uint32_t size, uint32_t flags);
136 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
137 uint32_t size, uint32_t flags);
138 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
139 uint32_t flags, struct msm_gem_address_space *aspace,
140 struct drm_gem_object **bo, uint64_t *iova);
141 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
142 uint32_t flags, struct msm_gem_address_space *aspace,
143 struct drm_gem_object **bo, uint64_t *iova);
144 void msm_gem_kernel_put(struct drm_gem_object *bo,
145 struct msm_gem_address_space *aspace, bool locked);
146 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
147 struct dma_buf *dmabuf, struct sg_table *sgt);
148 __printf(2, 3)
149 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
150 #ifdef CONFIG_DEBUG_FS
151 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
152 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
153 #endif
155 static inline void
156 msm_gem_lock(struct drm_gem_object *obj)
158 dma_resv_lock(obj->resv, NULL);
161 static inline bool __must_check
162 msm_gem_trylock(struct drm_gem_object *obj)
164 return dma_resv_trylock(obj->resv);
167 static inline int
168 msm_gem_lock_interruptible(struct drm_gem_object *obj)
170 return dma_resv_lock_interruptible(obj->resv, NULL);
173 static inline void
174 msm_gem_unlock(struct drm_gem_object *obj)
176 dma_resv_unlock(obj->resv);
179 static inline bool
180 msm_gem_is_locked(struct drm_gem_object *obj)
182 return dma_resv_is_locked(obj->resv);
185 static inline bool is_active(struct msm_gem_object *msm_obj)
187 WARN_ON(!msm_gem_is_locked(&msm_obj->base));
188 return msm_obj->active_count;
191 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
193 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
194 !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
197 static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
199 WARN_ON(!msm_gem_is_locked(&msm_obj->base));
200 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
203 void msm_gem_purge(struct drm_gem_object *obj);
204 void msm_gem_vunmap(struct drm_gem_object *obj);
206 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
207 * associated with the cmdstream submission for synchronization (and
208 * make it easier to unwind when things go wrong, etc). This only
209 * lasts for the duration of the submit-ioctl.
211 struct msm_gem_submit {
212 struct kref ref;
213 struct drm_device *dev;
214 struct msm_gpu *gpu;
215 struct msm_gem_address_space *aspace;
216 struct list_head node; /* node in ring submit list */
217 struct list_head bo_list;
218 struct ww_acquire_ctx ticket;
219 uint32_t seqno; /* Sequence number of the submit on the ring */
220 struct dma_fence *fence;
221 struct msm_gpu_submitqueue *queue;
222 struct pid *pid; /* submitting process */
223 bool valid; /* true if no cmdstream patching needed */
224 bool in_rb; /* "sudo" mode, copy cmds into RB */
225 struct msm_ringbuffer *ring;
226 struct msm_file_private *ctx;
227 unsigned int nr_cmds;
228 unsigned int nr_bos;
229 u32 ident; /* A "identifier" for the submit for logging */
230 struct {
231 uint32_t type;
232 uint32_t size; /* in dwords */
233 uint64_t iova;
234 uint32_t offset;/* in dwords */
235 uint32_t idx; /* cmdstream buffer idx in bos[] */
236 uint32_t nr_relocs;
237 struct drm_msm_gem_submit_reloc *relocs;
238 } *cmd; /* array of size nr_cmds */
239 struct {
240 uint32_t flags;
241 union {
242 struct msm_gem_object *obj;
243 uint32_t handle;
245 uint64_t iova;
246 } bos[];
249 void __msm_gem_submit_destroy(struct kref *kref);
251 static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
253 kref_get(&submit->ref);
256 static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
258 kref_put(&submit->ref, __msm_gem_submit_destroy);
261 /* helper to determine of a buffer in submit should be dumped, used for both
262 * devcoredump and debugfs cmdstream dumping:
264 static inline bool
265 should_dump(struct msm_gem_submit *submit, int idx)
267 extern bool rd_full;
268 return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
271 #endif /* __MSM_GEM_H__ */