2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
14 #include "display/intel_frontbuffer.h"
15 #include "i915_gem_object_types.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_vma_types.h"
19 void i915_gem_init__objects(struct drm_i915_private
*i915
);
21 struct drm_i915_gem_object
*i915_gem_object_alloc(void);
22 void i915_gem_object_free(struct drm_i915_gem_object
*obj
);
24 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
25 const struct drm_i915_gem_object_ops
*ops
,
26 struct lock_class_key
*key
);
27 struct drm_i915_gem_object
*
28 i915_gem_object_create_shmem(struct drm_i915_private
*i915
,
29 resource_size_t size
);
30 struct drm_i915_gem_object
*
31 i915_gem_object_create_shmem_from_data(struct drm_i915_private
*i915
,
32 const void *data
, resource_size_t size
);
34 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops
;
35 void __i915_gem_object_release_shmem(struct drm_i915_gem_object
*obj
,
36 struct sg_table
*pages
,
39 int i915_gem_object_attach_phys(struct drm_i915_gem_object
*obj
, int align
);
41 void i915_gem_flush_free_objects(struct drm_i915_private
*i915
);
44 __i915_gem_object_unset_pages(struct drm_i915_gem_object
*obj
);
45 void i915_gem_object_truncate(struct drm_i915_gem_object
*obj
);
48 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
49 * @filp: DRM file private date
50 * @handle: userspace handle
54 * A pointer to the object named by the handle if such exists on @filp, NULL
55 * otherwise. This object is only valid whilst under the RCU read lock, and
56 * note carefully the object may be in the process of being destroyed.
58 static inline struct drm_i915_gem_object
*
59 i915_gem_object_lookup_rcu(struct drm_file
*file
, u32 handle
)
62 WARN_ON(debug_locks
&& !lock_is_held(&rcu_lock_map
));
64 return idr_find(&file
->object_idr
, handle
);
67 static inline struct drm_i915_gem_object
*
68 i915_gem_object_get_rcu(struct drm_i915_gem_object
*obj
)
70 if (obj
&& !kref_get_unless_zero(&obj
->base
.refcount
))
76 static inline struct drm_i915_gem_object
*
77 i915_gem_object_lookup(struct drm_file
*file
, u32 handle
)
79 struct drm_i915_gem_object
*obj
;
82 obj
= i915_gem_object_lookup_rcu(file
, handle
);
83 obj
= i915_gem_object_get_rcu(obj
);
90 struct drm_gem_object
*
91 drm_gem_object_lookup(struct drm_file
*file
, u32 handle
);
93 __attribute__((nonnull
))
94 static inline struct drm_i915_gem_object
*
95 i915_gem_object_get(struct drm_i915_gem_object
*obj
)
97 drm_gem_object_get(&obj
->base
);
101 __attribute__((nonnull
))
103 i915_gem_object_put(struct drm_i915_gem_object
*obj
)
105 __drm_gem_object_put(&obj
->base
);
108 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
110 static inline int __i915_gem_object_lock(struct drm_i915_gem_object
*obj
,
111 struct i915_gem_ww_ctx
*ww
,
117 ret
= dma_resv_lock_interruptible(obj
->base
.resv
, ww
? &ww
->ctx
: NULL
);
119 ret
= dma_resv_lock(obj
->base
.resv
, ww
? &ww
->ctx
: NULL
);
122 list_add_tail(&obj
->obj_link
, &ww
->obj_list
);
123 if (ret
== -EALREADY
)
132 static inline int i915_gem_object_lock(struct drm_i915_gem_object
*obj
,
133 struct i915_gem_ww_ctx
*ww
)
135 return __i915_gem_object_lock(obj
, ww
, ww
&& ww
->intr
);
138 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object
*obj
,
139 struct i915_gem_ww_ctx
*ww
)
141 WARN_ON(ww
&& !ww
->intr
);
142 return __i915_gem_object_lock(obj
, ww
, true);
145 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object
*obj
)
147 return dma_resv_trylock(obj
->base
.resv
);
150 static inline void i915_gem_object_unlock(struct drm_i915_gem_object
*obj
)
152 dma_resv_unlock(obj
->base
.resv
);
156 i915_gem_object_lock_fence(struct drm_i915_gem_object
*obj
);
157 void i915_gem_object_unlock_fence(struct drm_i915_gem_object
*obj
,
158 struct dma_fence
*fence
);
161 i915_gem_object_set_readonly(struct drm_i915_gem_object
*obj
)
163 obj
->flags
|= I915_BO_READONLY
;
167 i915_gem_object_is_readonly(const struct drm_i915_gem_object
*obj
)
169 return obj
->flags
& I915_BO_READONLY
;
173 i915_gem_object_is_contiguous(const struct drm_i915_gem_object
*obj
)
175 return obj
->flags
& I915_BO_ALLOC_CONTIGUOUS
;
179 i915_gem_object_is_volatile(const struct drm_i915_gem_object
*obj
)
181 return obj
->flags
& I915_BO_ALLOC_VOLATILE
;
185 i915_gem_object_set_volatile(struct drm_i915_gem_object
*obj
)
187 obj
->flags
|= I915_BO_ALLOC_VOLATILE
;
191 i915_gem_object_type_has(const struct drm_i915_gem_object
*obj
,
194 return obj
->ops
->flags
& flags
;
198 i915_gem_object_has_struct_page(const struct drm_i915_gem_object
*obj
)
200 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_HAS_STRUCT_PAGE
);
204 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object
*obj
)
206 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_IS_SHRINKABLE
);
210 i915_gem_object_is_proxy(const struct drm_i915_gem_object
*obj
)
212 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_IS_PROXY
);
216 i915_gem_object_never_mmap(const struct drm_i915_gem_object
*obj
)
218 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_NO_MMAP
);
222 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object
*obj
)
224 return i915_gem_object_type_has(obj
, I915_GEM_OBJECT_ASYNC_CANCEL
);
228 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object
*obj
)
230 return READ_ONCE(obj
->frontbuffer
);
233 static inline unsigned int
234 i915_gem_object_get_tiling(const struct drm_i915_gem_object
*obj
)
236 return obj
->tiling_and_stride
& TILING_MASK
;
240 i915_gem_object_is_tiled(const struct drm_i915_gem_object
*obj
)
242 return i915_gem_object_get_tiling(obj
) != I915_TILING_NONE
;
245 static inline unsigned int
246 i915_gem_object_get_stride(const struct drm_i915_gem_object
*obj
)
248 return obj
->tiling_and_stride
& STRIDE_MASK
;
251 static inline unsigned int
252 i915_gem_tile_height(unsigned int tiling
)
255 return tiling
== I915_TILING_Y
? 32 : 8;
258 static inline unsigned int
259 i915_gem_object_get_tile_height(const struct drm_i915_gem_object
*obj
)
261 return i915_gem_tile_height(i915_gem_object_get_tiling(obj
));
264 static inline unsigned int
265 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object
*obj
)
267 return (i915_gem_object_get_stride(obj
) *
268 i915_gem_object_get_tile_height(obj
));
271 int i915_gem_object_set_tiling(struct drm_i915_gem_object
*obj
,
272 unsigned int tiling
, unsigned int stride
);
275 __i915_gem_object_get_sg(struct drm_i915_gem_object
*obj
,
276 struct i915_gem_object_page_iter
*iter
,
278 unsigned int *offset
);
280 static inline struct scatterlist
*
281 i915_gem_object_get_sg(struct drm_i915_gem_object
*obj
,
283 unsigned int *offset
)
285 return __i915_gem_object_get_sg(obj
, &obj
->mm
.get_page
, n
, offset
);
288 static inline struct scatterlist
*
289 i915_gem_object_get_sg_dma(struct drm_i915_gem_object
*obj
,
291 unsigned int *offset
)
293 return __i915_gem_object_get_sg(obj
, &obj
->mm
.get_dma_page
, n
, offset
);
297 i915_gem_object_get_page(struct drm_i915_gem_object
*obj
,
301 i915_gem_object_get_dirty_page(struct drm_i915_gem_object
*obj
,
305 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object
*obj
,
310 i915_gem_object_get_dma_address(struct drm_i915_gem_object
*obj
,
313 void __i915_gem_object_set_pages(struct drm_i915_gem_object
*obj
,
314 struct sg_table
*pages
,
315 unsigned int sg_page_sizes
);
317 int ____i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
);
318 int __i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
);
320 enum i915_mm_subclass
{ /* lockdep subclass for obj->mm.lock/struct_mutex */
323 * Only used by struct_mutex, when called "recursively" from
324 * direct-reclaim-esque. Safe because there is only every one
325 * struct_mutex in the entire system.
327 I915_MM_SHRINKER
= 1,
329 * Used for obj->mm.lock when allocating pages. Safe because the object
330 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
331 * it. As soon as the object has pages, obj->mm.lock nests within
334 I915_MM_GET_PAGES
= 1,
337 static inline int __must_check
338 i915_gem_object_pin_pages(struct drm_i915_gem_object
*obj
)
340 might_lock_nested(&obj
->mm
.lock
, I915_MM_GET_PAGES
);
342 if (atomic_inc_not_zero(&obj
->mm
.pages_pin_count
))
345 return __i915_gem_object_get_pages(obj
);
349 i915_gem_object_has_pages(struct drm_i915_gem_object
*obj
)
351 return !IS_ERR_OR_NULL(READ_ONCE(obj
->mm
.pages
));
355 __i915_gem_object_pin_pages(struct drm_i915_gem_object
*obj
)
357 GEM_BUG_ON(!i915_gem_object_has_pages(obj
));
359 atomic_inc(&obj
->mm
.pages_pin_count
);
363 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object
*obj
)
365 return atomic_read(&obj
->mm
.pages_pin_count
);
369 __i915_gem_object_unpin_pages(struct drm_i915_gem_object
*obj
)
371 GEM_BUG_ON(!i915_gem_object_has_pages(obj
));
372 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
374 atomic_dec(&obj
->mm
.pages_pin_count
);
378 i915_gem_object_unpin_pages(struct drm_i915_gem_object
*obj
)
380 __i915_gem_object_unpin_pages(obj
);
383 int __i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
);
384 void i915_gem_object_truncate(struct drm_i915_gem_object
*obj
);
385 void i915_gem_object_writeback(struct drm_i915_gem_object
*obj
);
390 #define I915_MAP_OVERRIDE BIT(31)
391 I915_MAP_FORCE_WB
= I915_MAP_WB
| I915_MAP_OVERRIDE
,
392 I915_MAP_FORCE_WC
= I915_MAP_WC
| I915_MAP_OVERRIDE
,
396 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
397 * @obj: the object to map into kernel address space
398 * @type: the type of mapping, used to select pgprot_t
400 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
401 * pages and then returns a contiguous mapping of the backing storage into
402 * the kernel address space. Based on the @type of mapping, the PTE will be
403 * set to either WriteBack or WriteCombine (via pgprot_t).
405 * The caller is responsible for calling i915_gem_object_unpin_map() when the
406 * mapping is no longer required.
408 * Returns the pointer through which to access the mapped object, or an
409 * ERR_PTR() on error.
411 void *__must_check
i915_gem_object_pin_map(struct drm_i915_gem_object
*obj
,
412 enum i915_map_type type
);
414 void __i915_gem_object_flush_map(struct drm_i915_gem_object
*obj
,
415 unsigned long offset
,
417 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object
*obj
)
419 __i915_gem_object_flush_map(obj
, 0, obj
->base
.size
);
423 * i915_gem_object_unpin_map - releases an earlier mapping
424 * @obj: the object to unmap
426 * After pinning the object and mapping its pages, once you are finished
427 * with your access, call i915_gem_object_unpin_map() to release the pin
428 * upon the mapping. Once the pin count reaches zero, that mapping may be
431 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object
*obj
)
433 i915_gem_object_unpin_pages(obj
);
436 void __i915_gem_object_release_map(struct drm_i915_gem_object
*obj
);
439 i915_gem_object_flush_write_domain(struct drm_i915_gem_object
*obj
,
440 unsigned int flush_domains
);
442 int i915_gem_object_prepare_read(struct drm_i915_gem_object
*obj
,
443 unsigned int *needs_clflush
);
444 int i915_gem_object_prepare_write(struct drm_i915_gem_object
*obj
,
445 unsigned int *needs_clflush
);
446 #define CLFLUSH_BEFORE BIT(0)
447 #define CLFLUSH_AFTER BIT(1)
448 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
451 i915_gem_object_finish_access(struct drm_i915_gem_object
*obj
)
453 i915_gem_object_unpin_pages(obj
);
456 static inline struct intel_engine_cs
*
457 i915_gem_object_last_write_engine(struct drm_i915_gem_object
*obj
)
459 struct intel_engine_cs
*engine
= NULL
;
460 struct dma_fence
*fence
;
463 fence
= dma_resv_get_excl_rcu(obj
->base
.resv
);
466 if (fence
&& dma_fence_is_i915(fence
) && !dma_fence_is_signaled(fence
))
467 engine
= to_request(fence
)->engine
;
468 dma_fence_put(fence
);
473 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object
*obj
,
474 unsigned int cache_level
);
475 void i915_gem_object_flush_if_display(struct drm_i915_gem_object
*obj
);
476 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object
*obj
);
479 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object
*obj
, bool write
);
481 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
, bool write
);
483 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
);
484 struct i915_vma
* __must_check
485 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
487 const struct i915_ggtt_view
*view
,
489 void i915_gem_object_unpin_from_display_plane(struct i915_vma
*vma
);
491 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object
*obj
);
492 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object
*obj
);
493 void i915_gem_object_make_purgeable(struct drm_i915_gem_object
*obj
);
495 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object
*obj
)
497 if (obj
->cache_dirty
)
500 if (!(obj
->cache_coherent
& I915_BO_CACHE_COHERENT_FOR_WRITE
))
503 /* Currently in use by HW (display engine)? Keep flushed. */
504 return i915_gem_object_is_framebuffer(obj
);
507 static inline void __start_cpu_write(struct drm_i915_gem_object
*obj
)
509 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
510 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
511 if (cpu_write_needs_clflush(obj
))
512 obj
->cache_dirty
= true;
515 int i915_gem_object_wait(struct drm_i915_gem_object
*obj
,
518 int i915_gem_object_wait_priority(struct drm_i915_gem_object
*obj
,
520 const struct i915_sched_attr
*attr
);
522 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object
*obj
,
523 enum fb_op_origin origin
);
524 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object
*obj
,
525 enum fb_op_origin origin
);
528 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object
*obj
,
529 enum fb_op_origin origin
)
531 if (unlikely(rcu_access_pointer(obj
->frontbuffer
)))
532 __i915_gem_object_flush_frontbuffer(obj
, origin
);
536 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object
*obj
,
537 enum fb_op_origin origin
)
539 if (unlikely(rcu_access_pointer(obj
->frontbuffer
)))
540 __i915_gem_object_invalidate_frontbuffer(obj
, origin
);