2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
14 #include <drm/i915_drm.h>
16 #include "display/intel_frontbuffer.h"
17 #include "i915_gem_object_types.h"
18 #include "i915_gem_gtt.h"
19 #include "i915_vma_types.h"
21 void i915_gem_init__objects(struct drm_i915_private
*i915
);
23 struct drm_i915_gem_object
*i915_gem_object_alloc(void);
24 void i915_gem_object_free(struct drm_i915_gem_object
*obj
);
26 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
27 const struct drm_i915_gem_object_ops
*ops
,
28 struct lock_class_key
*key
);
29 struct drm_i915_gem_object
*
30 i915_gem_object_create_shmem(struct drm_i915_private
*i915
,
31 resource_size_t size
);
32 struct drm_i915_gem_object
*
33 i915_gem_object_create_shmem_from_data(struct drm_i915_private
*i915
,
34 const void *data
, resource_size_t size
);
36 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops
;
37 void __i915_gem_object_release_shmem(struct drm_i915_gem_object
*obj
,
38 struct sg_table
*pages
,
41 int i915_gem_object_attach_phys(struct drm_i915_gem_object
*obj
, int align
);
43 void i915_gem_close_object(struct drm_gem_object
*gem
, struct drm_file
*file
);
44 void i915_gem_free_object(struct drm_gem_object
*obj
);
46 void i915_gem_flush_free_objects(struct drm_i915_private
*i915
);
49 __i915_gem_object_unset_pages(struct drm_i915_gem_object
*obj
);
50 void i915_gem_object_truncate(struct drm_i915_gem_object
*obj
);
53 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
54 * @filp: DRM file private date
55 * @handle: userspace handle
59 * A pointer to the object named by the handle if such exists on @filp, NULL
60 * otherwise. This object is only valid whilst under the RCU read lock, and
61 * note carefully the object may be in the process of being destroyed.
63 static inline struct drm_i915_gem_object
*
64 i915_gem_object_lookup_rcu(struct drm_file
*file
, u32 handle
)
67 WARN_ON(debug_locks
&& !lock_is_held(&rcu_lock_map
));
69 return idr_find(&file
->object_idr
, handle
);
72 static inline struct drm_i915_gem_object
*
73 i915_gem_object_lookup(struct drm_file
*file
, u32 handle
)
75 struct drm_i915_gem_object
*obj
;
78 obj
= i915_gem_object_lookup_rcu(file
, handle
);
79 if (obj
&& !kref_get_unless_zero(&obj
->base
.refcount
))
87 struct drm_gem_object
*
88 drm_gem_object_lookup(struct drm_file
*file
, u32 handle
);
90 __attribute__((nonnull
))
91 static inline struct drm_i915_gem_object
*
92 i915_gem_object_get(struct drm_i915_gem_object
*obj
)
94 drm_gem_object_get(&obj
->base
);
98 __attribute__((nonnull
))
100 i915_gem_object_put(struct drm_i915_gem_object
*obj
)
102 __drm_gem_object_put(&obj
->base
);
105 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
107 static inline void i915_gem_object_lock(struct drm_i915_gem_object
*obj
)
109 dma_resv_lock(obj
->base
.resv
, NULL
);
112 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object
*obj
)
114 return dma_resv_trylock(obj
->base
.resv
);
118 i915_gem_object_lock_interruptible(struct drm_i915_gem_object
*obj
)
120 return dma_resv_lock_interruptible(obj
->base
.resv
, NULL
);
123 static inline void i915_gem_object_unlock(struct drm_i915_gem_object
*obj
)
125 dma_resv_unlock(obj
->base
.resv
);
129 i915_gem_object_lock_fence(struct drm_i915_gem_object
*obj
);
130 void i915_gem_object_unlock_fence(struct drm_i915_gem_object
*obj
,
131 struct dma_fence
*fence
);
134 i915_gem_object_set_readonly(struct drm_i915_gem_object
*obj
)
136 obj
->flags
|= I915_BO_READONLY
;
140 i915_gem_object_is_readonly(const struct drm_i915_gem_object
*obj
)
142 return obj
->flags
& I915_BO_READONLY
;
146 i915_gem_object_is_contiguous(const struct drm_i915_gem_object
*obj
)
148 return obj
->flags
& I915_BO_ALLOC_CONTIGUOUS
;
152 i915_gem_object_is_volatile(const struct drm_i915_gem_object
*obj
)
154 return obj
->flags
& I915_BO_ALLOC_VOLATILE
;
158 i915_gem_object_set_volatile(struct drm_i915_gem_object
*obj
)
160 obj
->flags
|= I915_BO_ALLOC_VOLATILE
;
164 i915_gem_object_type_has(const struct drm_i915_gem_object
*obj
,
167 return obj
->ops
->flags
& flags
;
171 i915_gem_object_has_struct_page(const struct drm_i915_gem_object
*obj
)
173 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_HAS_STRUCT_PAGE
);
177 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object
*obj
)
179 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_IS_SHRINKABLE
);
183 i915_gem_object_is_proxy(const struct drm_i915_gem_object
*obj
)
185 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_IS_PROXY
);
189 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object
*obj
)
191 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_NO_GGTT
);
195 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object
*obj
)
197 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_ASYNC_CANCEL
);
201 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object
*obj
)
203 return READ_ONCE(obj
->frontbuffer
);
206 static inline unsigned int
207 i915_gem_object_get_tiling(const struct drm_i915_gem_object
*obj
)
209 return obj
->tiling_and_stride
& TILING_MASK
;
213 i915_gem_object_is_tiled(const struct drm_i915_gem_object
*obj
)
215 return i915_gem_object_get_tiling(obj
) != I915_TILING_NONE
;
218 static inline unsigned int
219 i915_gem_object_get_stride(const struct drm_i915_gem_object
*obj
)
221 return obj
->tiling_and_stride
& STRIDE_MASK
;
224 static inline unsigned int
225 i915_gem_tile_height(unsigned int tiling
)
228 return tiling
== I915_TILING_Y
? 32 : 8;
231 static inline unsigned int
232 i915_gem_object_get_tile_height(const struct drm_i915_gem_object
*obj
)
234 return i915_gem_tile_height(i915_gem_object_get_tiling(obj
));
237 static inline unsigned int
238 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object
*obj
)
240 return (i915_gem_object_get_stride(obj
) *
241 i915_gem_object_get_tile_height(obj
));
244 int i915_gem_object_set_tiling(struct drm_i915_gem_object
*obj
,
245 unsigned int tiling
, unsigned int stride
);
248 i915_gem_object_get_sg(struct drm_i915_gem_object
*obj
,
249 unsigned int n
, unsigned int *offset
);
252 i915_gem_object_get_page(struct drm_i915_gem_object
*obj
,
256 i915_gem_object_get_dirty_page(struct drm_i915_gem_object
*obj
,
260 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object
*obj
,
265 i915_gem_object_get_dma_address(struct drm_i915_gem_object
*obj
,
268 void __i915_gem_object_set_pages(struct drm_i915_gem_object
*obj
,
269 struct sg_table
*pages
,
270 unsigned int sg_page_sizes
);
272 int ____i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
);
273 int __i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
);
275 enum i915_mm_subclass
{ /* lockdep subclass for obj->mm.lock/struct_mutex */
278 * Only used by struct_mutex, when called "recursively" from
279 * direct-reclaim-esque. Safe because there is only every one
280 * struct_mutex in the entire system.
282 I915_MM_SHRINKER
= 1,
284 * Used for obj->mm.lock when allocating pages. Safe because the object
285 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
286 * it. As soon as the object has pages, obj->mm.lock nests within
289 I915_MM_GET_PAGES
= 1,
292 static inline int __must_check
293 i915_gem_object_pin_pages(struct drm_i915_gem_object
*obj
)
295 might_lock_nested(&obj
->mm
.lock
, I915_MM_GET_PAGES
);
297 if (atomic_inc_not_zero(&obj
->mm
.pages_pin_count
))
300 return __i915_gem_object_get_pages(obj
);
304 i915_gem_object_has_pages(struct drm_i915_gem_object
*obj
)
306 return !IS_ERR_OR_NULL(READ_ONCE(obj
->mm
.pages
));
310 __i915_gem_object_pin_pages(struct drm_i915_gem_object
*obj
)
312 GEM_BUG_ON(!i915_gem_object_has_pages(obj
));
314 atomic_inc(&obj
->mm
.pages_pin_count
);
318 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object
*obj
)
320 return atomic_read(&obj
->mm
.pages_pin_count
);
324 __i915_gem_object_unpin_pages(struct drm_i915_gem_object
*obj
)
326 GEM_BUG_ON(!i915_gem_object_has_pages(obj
));
327 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
329 atomic_dec(&obj
->mm
.pages_pin_count
);
333 i915_gem_object_unpin_pages(struct drm_i915_gem_object
*obj
)
335 __i915_gem_object_unpin_pages(obj
);
338 int __i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
);
339 void i915_gem_object_truncate(struct drm_i915_gem_object
*obj
);
340 void i915_gem_object_writeback(struct drm_i915_gem_object
*obj
);
345 #define I915_MAP_OVERRIDE BIT(31)
346 I915_MAP_FORCE_WB
= I915_MAP_WB
| I915_MAP_OVERRIDE
,
347 I915_MAP_FORCE_WC
= I915_MAP_WC
| I915_MAP_OVERRIDE
,
351 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
352 * @obj: the object to map into kernel address space
353 * @type: the type of mapping, used to select pgprot_t
355 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
356 * pages and then returns a contiguous mapping of the backing storage into
357 * the kernel address space. Based on the @type of mapping, the PTE will be
358 * set to either WriteBack or WriteCombine (via pgprot_t).
360 * The caller is responsible for calling i915_gem_object_unpin_map() when the
361 * mapping is no longer required.
363 * Returns the pointer through which to access the mapped object, or an
364 * ERR_PTR() on error.
366 void *__must_check
i915_gem_object_pin_map(struct drm_i915_gem_object
*obj
,
367 enum i915_map_type type
);
369 void __i915_gem_object_flush_map(struct drm_i915_gem_object
*obj
,
370 unsigned long offset
,
372 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object
*obj
)
374 __i915_gem_object_flush_map(obj
, 0, obj
->base
.size
);
378 * i915_gem_object_unpin_map - releases an earlier mapping
379 * @obj: the object to unmap
381 * After pinning the object and mapping its pages, once you are finished
382 * with your access, call i915_gem_object_unpin_map() to release the pin
383 * upon the mapping. Once the pin count reaches zero, that mapping may be
386 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object
*obj
)
388 i915_gem_object_unpin_pages(obj
);
392 i915_gem_object_flush_write_domain(struct drm_i915_gem_object
*obj
,
393 unsigned int flush_domains
);
395 int i915_gem_object_prepare_read(struct drm_i915_gem_object
*obj
,
396 unsigned int *needs_clflush
);
397 int i915_gem_object_prepare_write(struct drm_i915_gem_object
*obj
,
398 unsigned int *needs_clflush
);
399 #define CLFLUSH_BEFORE BIT(0)
400 #define CLFLUSH_AFTER BIT(1)
401 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
404 i915_gem_object_finish_access(struct drm_i915_gem_object
*obj
)
406 i915_gem_object_unpin_pages(obj
);
407 i915_gem_object_unlock(obj
);
410 static inline struct intel_engine_cs
*
411 i915_gem_object_last_write_engine(struct drm_i915_gem_object
*obj
)
413 struct intel_engine_cs
*engine
= NULL
;
414 struct dma_fence
*fence
;
417 fence
= dma_resv_get_excl_rcu(obj
->base
.resv
);
420 if (fence
&& dma_fence_is_i915(fence
) && !dma_fence_is_signaled(fence
))
421 engine
= to_request(fence
)->engine
;
422 dma_fence_put(fence
);
427 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object
*obj
,
428 unsigned int cache_level
);
429 void i915_gem_object_flush_if_display(struct drm_i915_gem_object
*obj
);
432 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object
*obj
, bool write
);
434 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
, bool write
);
436 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
);
437 struct i915_vma
* __must_check
438 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
440 const struct i915_ggtt_view
*view
,
442 void i915_gem_object_unpin_from_display_plane(struct i915_vma
*vma
);
444 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object
*obj
);
445 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object
*obj
);
446 void i915_gem_object_make_purgeable(struct drm_i915_gem_object
*obj
);
448 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object
*obj
)
450 if (obj
->cache_dirty
)
453 if (!(obj
->cache_coherent
& I915_BO_CACHE_COHERENT_FOR_WRITE
))
456 /* Currently in use by HW (display engine)? Keep flushed. */
457 return i915_gem_object_is_framebuffer(obj
);
460 static inline void __start_cpu_write(struct drm_i915_gem_object
*obj
)
462 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
463 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
464 if (cpu_write_needs_clflush(obj
))
465 obj
->cache_dirty
= true;
468 int i915_gem_object_wait(struct drm_i915_gem_object
*obj
,
471 int i915_gem_object_wait_priority(struct drm_i915_gem_object
*obj
,
473 const struct i915_sched_attr
*attr
);
475 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object
*obj
,
476 enum fb_op_origin origin
);
477 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object
*obj
,
478 enum fb_op_origin origin
);
481 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object
*obj
,
482 enum fb_op_origin origin
)
484 if (unlikely(rcu_access_pointer(obj
->frontbuffer
)))
485 __i915_gem_object_flush_frontbuffer(obj
, origin
);
489 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object
*obj
,
490 enum fb_op_origin origin
)
492 if (unlikely(rcu_access_pointer(obj
->frontbuffer
)))
493 __i915_gem_object_invalidate_frontbuffer(obj
, origin
);