2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2018 Intel Corporation
7 #include "i915_gem_batch_pool.h"
13 * In order to submit batch buffers as 'secure', the software command parser
14 * must ensure that a batch buffer cannot be modified after parsing. It does
15 * this by copying the user provided batch buffer contents to a kernel owned
16 * buffer from which the hardware will actually execute, and by carefully
17 * managing the address space bindings for such buffers.
19 * The batch pool framework provides a mechanism for the driver to manage a
20 * set of scratch buffers to use for this purpose. The framework can be
21 * extended to support other uses cases should they arise.
25 * i915_gem_batch_pool_init() - initialize a batch buffer pool
26 * @pool: the batch buffer pool
27 * @engine: the associated request submission engine
29 void i915_gem_batch_pool_init(struct i915_gem_batch_pool
*pool
,
30 struct intel_engine_cs
*engine
)
34 pool
->engine
= engine
;
36 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++)
37 INIT_LIST_HEAD(&pool
->cache_list
[n
]);
41 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
42 * @pool: the pool to clean up
44 * Note: Callers must hold the struct_mutex.
46 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool
*pool
)
50 lockdep_assert_held(&pool
->engine
->i915
->drm
.struct_mutex
);
52 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++) {
53 struct drm_i915_gem_object
*obj
, *next
;
55 list_for_each_entry_safe(obj
, next
,
58 __i915_gem_object_release_unless_active(obj
);
60 INIT_LIST_HEAD(&pool
->cache_list
[n
]);
65 * i915_gem_batch_pool_get() - allocate a buffer from the pool
66 * @pool: the batch buffer pool
67 * @size: the minimum desired size of the returned buffer
69 * Returns an inactive buffer from @pool with at least @size bytes,
70 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
71 * on the returned object.
73 * Note: Callers must hold the struct_mutex
75 * Return: the buffer object or an error pointer
77 struct drm_i915_gem_object
*
78 i915_gem_batch_pool_get(struct i915_gem_batch_pool
*pool
,
81 struct drm_i915_gem_object
*obj
;
82 struct list_head
*list
;
85 lockdep_assert_held(&pool
->engine
->i915
->drm
.struct_mutex
);
87 /* Compute a power-of-two bucket, but throw everything greater than
88 * 16KiB into the same bucket: i.e. the the buckets hold objects of
89 * (1 page, 2 pages, 4 pages, 8+ pages).
91 n
= fls(size
>> PAGE_SHIFT
) - 1;
92 if (n
>= ARRAY_SIZE(pool
->cache_list
))
93 n
= ARRAY_SIZE(pool
->cache_list
) - 1;
94 list
= &pool
->cache_list
[n
];
96 list_for_each_entry(obj
, list
, batch_pool_link
) {
97 /* The batches are strictly LRU ordered */
98 if (i915_gem_object_is_active(obj
)) {
99 struct reservation_object
*resv
= obj
->resv
;
101 if (!reservation_object_test_signaled_rcu(resv
, true))
104 i915_retire_requests(pool
->engine
->i915
);
105 GEM_BUG_ON(i915_gem_object_is_active(obj
));
108 * The object is now idle, clear the array of shared
109 * fences before we add a new request. Although, we
110 * remain on the same engine, we may be on a different
111 * timeline and so may continually grow the array,
112 * trapping a reference to all the old fences, rather
113 * than replace the existing fence.
115 if (rcu_access_pointer(resv
->fence
)) {
116 reservation_object_lock(resv
, NULL
);
117 reservation_object_add_excl_fence(resv
, NULL
);
118 reservation_object_unlock(resv
);
122 GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj
->resv
,
125 if (obj
->base
.size
>= size
)
129 obj
= i915_gem_object_create_internal(pool
->engine
->i915
, size
);
134 ret
= i915_gem_object_pin_pages(obj
);
138 list_move_tail(&obj
->batch_pool_link
, list
);