5 * GEM Graphics Execution Manager Driver Interfaces
7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Copyright (c) 2009-2010, Code Aurora Forum.
10 * All rights reserved.
11 * Copyright © 2014 Intel Corporation
12 * Daniel Vetter <daniel.vetter@ffwll.ch>
14 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
15 * Author: Gareth Hughes <gareth@valinux.com>
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
37 #include <linux/kref.h>
38 #include <linux/dma-resv.h>
40 #include <drm/drm_vma_manager.h>
42 struct drm_gem_object
;
45 * struct drm_gem_object_funcs - GEM object functions
47 struct drm_gem_object_funcs
{
51 * Deconstructor for drm_gem_objects.
53 * This callback is mandatory.
55 void (*free
)(struct drm_gem_object
*obj
);
60 * Called upon GEM handle creation.
62 * This callback is optional.
64 int (*open
)(struct drm_gem_object
*obj
, struct drm_file
*file
);
69 * Called upon GEM handle release.
71 * This callback is optional.
73 void (*close
)(struct drm_gem_object
*obj
, struct drm_file
*file
);
78 * If driver subclasses struct &drm_gem_object, it can implement this
79 * optional hook for printing additional driver specific info.
81 * drm_printf_indent() should be used in the callback passing it the
84 * This callback is called from drm_gem_print_info().
86 * This callback is optional.
88 void (*print_info
)(struct drm_printer
*p
, unsigned int indent
,
89 const struct drm_gem_object
*obj
);
94 * Export backing buffer as a &dma_buf.
95 * If this is not set drm_gem_prime_export() is used.
97 * This callback is optional.
99 struct dma_buf
*(*export
)(struct drm_gem_object
*obj
, int flags
);
104 * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
106 * This callback is optional.
108 int (*pin
)(struct drm_gem_object
*obj
);
113 * Unpin backing buffer. Used by the drm_gem_map_detach() helper.
115 * This callback is optional.
117 void (*unpin
)(struct drm_gem_object
*obj
);
122 * Returns a Scatter-Gather table representation of the buffer.
123 * Used when exporting a buffer by the drm_gem_map_dma_buf() helper.
124 * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table()
125 * in drm_gem_unmap_buf(), therefore these helpers and this callback
126 * here cannot be used for sg tables pointing at driver private memory
129 * See also drm_prime_pages_to_sg().
131 struct sg_table
*(*get_sg_table
)(struct drm_gem_object
*obj
);
136 * Returns a virtual address for the buffer. Used by the
137 * drm_gem_dmabuf_vmap() helper.
139 * This callback is optional.
141 void *(*vmap
)(struct drm_gem_object
*obj
);
146 * Releases the the address previously returned by @vmap. Used by the
147 * drm_gem_dmabuf_vunmap() helper.
149 * This callback is optional.
151 void (*vunmap
)(struct drm_gem_object
*obj
, void *vaddr
);
156 * Handle mmap() of the gem object, setup vma accordingly.
158 * This callback is optional.
160 * The callback is used by by both drm_gem_mmap_obj() and
161 * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
162 * used, the @mmap callback must set vma->vm_ops instead.
164 int (*mmap
)(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
);
169 * Virtual memory operations used with mmap.
171 * This is optional but necessary for mmap support.
173 const struct vm_operations_struct
*vm_ops
;
177 * struct drm_gem_object - GEM buffer object
179 * This structure defines the generic parts for GEM buffer objects, which are
180 * mostly around handling mmap and userspace handles.
182 * Buffer objects are often abbreviated to BO.
184 struct drm_gem_object
{
188 * Reference count of this object
190 * Please use drm_gem_object_get() to acquire and drm_gem_object_put()
191 * or drm_gem_object_put_unlocked() to release a reference to a GEM
194 struct kref refcount
;
199 * This is the GEM file_priv handle count of this object.
201 * Each handle also holds a reference. Note that when the handle_count
202 * drops to 0 any global names (e.g. the id in the flink namespace) will
205 * Protected by &drm_device.object_name_lock.
207 unsigned handle_count
;
210 * @dev: DRM dev this object belongs to.
212 struct drm_device
*dev
;
217 * SHMEM file node used as backing storage for swappable buffer objects.
218 * GEM also supports driver private objects with driver-specific backing
219 * storage (contiguous CMA memory, special reserved blocks). In this
220 * case @filp is NULL.
227 * Mapping info for this object to support mmap. Drivers are supposed to
228 * allocate the mmap offset using drm_gem_create_mmap_offset(). The
229 * offset itself can be retrieved using drm_vma_node_offset_addr().
231 * Memory mapping itself is handled by drm_gem_mmap(), which also checks
232 * that userspace is allowed to access the object.
234 struct drm_vma_offset_node vma_node
;
239 * Size of the object, in bytes. Immutable over the object's
247 * Global name for this object, starts at 1. 0 means unnamed.
248 * Access is covered by &drm_device.object_name_lock. This is used by
249 * the GEM_FLINK and GEM_OPEN ioctls.
256 * dma-buf associated with this GEM object.
258 * Pointer to the dma-buf associated with this gem object (either
259 * through importing or exporting). We break the resulting reference
260 * loop when the last gem handle for this object is released.
262 * Protected by &drm_device.object_name_lock.
264 struct dma_buf
*dma_buf
;
269 * dma-buf attachment backing this object.
271 * Any foreign dma_buf imported as a gem object has this set to the
272 * attachment point for the device. This is invariant over the lifetime
275 * The &drm_driver.gem_free_object callback is responsible for cleaning
276 * up the dma_buf attachment and references acquired at import time.
278 * Note that the drm gem/prime core does not depend upon drivers setting
279 * this field any more. So for drivers where this doesn't make sense
280 * (e.g. virtual devices or a displaylink behind an usb bus) they can
281 * simply leave it as NULL.
283 struct dma_buf_attachment
*import_attach
;
288 * Pointer to reservation object associated with the this GEM object.
290 * Normally (@resv == &@_resv) except for imported GEM objects.
292 struct dma_resv
*resv
;
297 * A reservation object for this GEM object.
299 * This is unused for imported GEM objects.
301 struct dma_resv _resv
;
306 * Optional GEM object functions. If this is set, it will be used instead of the
307 * corresponding &drm_driver GEM callbacks.
309 * New drivers should use this.
312 const struct drm_gem_object_funcs
*funcs
;
316 * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
317 * @name: name for the generated structure
319 * This macro autogenerates a suitable &struct file_operations for GEM based
320 * drivers, which can be assigned to &drm_driver.fops. Note that this structure
321 * cannot be shared between drivers, because it contains a reference to the
322 * current module using THIS_MODULE.
324 * Note that the declaration is already marked as static - if you need a
325 * non-static version of this you're probably doing it wrong and will break the
326 * THIS_MODULE reference by accident.
328 #define DEFINE_DRM_GEM_FOPS(name) \
329 static const struct file_operations name = {\
330 .owner = THIS_MODULE,\
332 .release = drm_release,\
333 .unlocked_ioctl = drm_ioctl,\
334 .compat_ioctl = drm_compat_ioctl,\
337 .llseek = noop_llseek,\
338 .mmap = drm_gem_mmap,\
341 void drm_gem_object_release(struct drm_gem_object
*obj
);
342 void drm_gem_object_free(struct kref
*kref
);
343 int drm_gem_object_init(struct drm_device
*dev
,
344 struct drm_gem_object
*obj
, size_t size
);
345 void drm_gem_private_object_init(struct drm_device
*dev
,
346 struct drm_gem_object
*obj
, size_t size
);
347 void drm_gem_vm_open(struct vm_area_struct
*vma
);
348 void drm_gem_vm_close(struct vm_area_struct
*vma
);
349 int drm_gem_mmap_obj(struct drm_gem_object
*obj
, unsigned long obj_size
,
350 struct vm_area_struct
*vma
);
351 int drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
);
354 * drm_gem_object_get - acquire a GEM buffer object reference
355 * @obj: GEM buffer object
357 * This function acquires an additional reference to @obj. It is illegal to
358 * call this without already holding a reference. No locks required.
360 static inline void drm_gem_object_get(struct drm_gem_object
*obj
)
362 kref_get(&obj
->refcount
);
366 * __drm_gem_object_put - raw function to release a GEM buffer object reference
367 * @obj: GEM buffer object
369 * This function is meant to be used by drivers which are not encumbered with
370 * &drm_device.struct_mutex legacy locking and which are using the
371 * gem_free_object_unlocked callback. It avoids all the locking checks and
372 * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked().
374 * Drivers should never call this directly in their code. Instead they should
375 * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)``
376 * wrapper function, and use that. Shared code should never call this, to
377 * avoid breaking drivers by accident which still depend upon
378 * &drm_device.struct_mutex locking.
381 __drm_gem_object_put(struct drm_gem_object
*obj
)
383 kref_put(&obj
->refcount
, drm_gem_object_free
);
386 void drm_gem_object_put_unlocked(struct drm_gem_object
*obj
);
387 void drm_gem_object_put(struct drm_gem_object
*obj
);
389 int drm_gem_handle_create(struct drm_file
*file_priv
,
390 struct drm_gem_object
*obj
,
392 int drm_gem_handle_delete(struct drm_file
*filp
, u32 handle
);
395 void drm_gem_free_mmap_offset(struct drm_gem_object
*obj
);
396 int drm_gem_create_mmap_offset(struct drm_gem_object
*obj
);
397 int drm_gem_create_mmap_offset_size(struct drm_gem_object
*obj
, size_t size
);
399 struct page
**drm_gem_get_pages(struct drm_gem_object
*obj
);
400 void drm_gem_put_pages(struct drm_gem_object
*obj
, struct page
**pages
,
401 bool dirty
, bool accessed
);
403 int drm_gem_objects_lookup(struct drm_file
*filp
, void __user
*bo_handles
,
404 int count
, struct drm_gem_object
***objs_out
);
405 struct drm_gem_object
*drm_gem_object_lookup(struct drm_file
*filp
, u32 handle
);
406 long drm_gem_dma_resv_wait(struct drm_file
*filep
, u32 handle
,
407 bool wait_all
, unsigned long timeout
);
408 int drm_gem_lock_reservations(struct drm_gem_object
**objs
, int count
,
409 struct ww_acquire_ctx
*acquire_ctx
);
410 void drm_gem_unlock_reservations(struct drm_gem_object
**objs
, int count
,
411 struct ww_acquire_ctx
*acquire_ctx
);
412 int drm_gem_fence_array_add(struct xarray
*fence_array
,
413 struct dma_fence
*fence
);
414 int drm_gem_fence_array_add_implicit(struct xarray
*fence_array
,
415 struct drm_gem_object
*obj
,
417 int drm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
418 u32 handle
, u64
*offset
);
419 int drm_gem_dumb_destroy(struct drm_file
*file
,
420 struct drm_device
*dev
,
423 #endif /* __DRM_GEM_H__ */