Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / i915_gem_batch_pool.c
blobc93005c2e0fb33e7c249f99b8cc838ff29e357ed
1 /*
2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include "i915_drv.h"
26 #include "i915_gem_batch_pool.h"
28 /**
29 * DOC: batch pool
31 * In order to submit batch buffers as 'secure', the software command parser
32 * must ensure that a batch buffer cannot be modified after parsing. It does
33 * this by copying the user provided batch buffer contents to a kernel owned
34 * buffer from which the hardware will actually execute, and by carefully
35 * managing the address space bindings for such buffers.
37 * The batch pool framework provides a mechanism for the driver to manage a
38 * set of scratch buffers to use for this purpose. The framework can be
39 * extended to support other uses cases should they arise.
42 /**
43 * i915_gem_batch_pool_init() - initialize a batch buffer pool
44 * @engine: the associated request submission engine
45 * @pool: the batch buffer pool
47 void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
48 struct i915_gem_batch_pool *pool)
50 int n;
52 pool->engine = engine;
54 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
55 INIT_LIST_HEAD(&pool->cache_list[n]);
58 /**
59 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
60 * @pool: the pool to clean up
62 * Note: Callers must hold the struct_mutex.
64 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
66 int n;
68 lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
70 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
71 struct drm_i915_gem_object *obj, *next;
73 list_for_each_entry_safe(obj, next,
74 &pool->cache_list[n],
75 batch_pool_link)
76 __i915_gem_object_release_unless_active(obj);
78 INIT_LIST_HEAD(&pool->cache_list[n]);
82 /**
83 * i915_gem_batch_pool_get() - allocate a buffer from the pool
84 * @pool: the batch buffer pool
85 * @size: the minimum desired size of the returned buffer
87 * Returns an inactive buffer from @pool with at least @size bytes,
88 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
89 * on the returned object.
91 * Note: Callers must hold the struct_mutex
93 * Return: the buffer object or an error pointer
95 struct drm_i915_gem_object *
96 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
97 size_t size)
99 struct drm_i915_gem_object *obj;
100 struct list_head *list;
101 int n, ret;
103 lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
105 /* Compute a power-of-two bucket, but throw everything greater than
106 * 16KiB into the same bucket: i.e. the the buckets hold objects of
107 * (1 page, 2 pages, 4 pages, 8+ pages).
109 n = fls(size >> PAGE_SHIFT) - 1;
110 if (n >= ARRAY_SIZE(pool->cache_list))
111 n = ARRAY_SIZE(pool->cache_list) - 1;
112 list = &pool->cache_list[n];
114 list_for_each_entry(obj, list, batch_pool_link) {
115 /* The batches are strictly LRU ordered */
116 if (i915_gem_object_is_active(obj)) {
117 struct reservation_object *resv = obj->resv;
119 if (!reservation_object_test_signaled_rcu(resv, true))
120 break;
122 i915_gem_retire_requests(pool->engine->i915);
123 GEM_BUG_ON(i915_gem_object_is_active(obj));
126 * The object is now idle, clear the array of shared
127 * fences before we add a new request. Although, we
128 * remain on the same engine, we may be on a different
129 * timeline and so may continually grow the array,
130 * trapping a reference to all the old fences, rather
131 * than replace the existing fence.
133 if (rcu_access_pointer(resv->fence)) {
134 reservation_object_lock(resv, NULL);
135 reservation_object_add_excl_fence(resv, NULL);
136 reservation_object_unlock(resv);
140 GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
141 true));
143 if (obj->base.size >= size)
144 goto found;
147 obj = i915_gem_object_create_internal(pool->engine->i915, size);
148 if (IS_ERR(obj))
149 return obj;
151 found:
152 ret = i915_gem_object_pin_pages(obj);
153 if (ret)
154 return ERR_PTR(ret);
156 list_move_tail(&obj->batch_pool_link, list);
157 return obj;