treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / intel_engine_pool.c
blob3971868183050aee3626d26056e12b33a752d66f
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2018 Intel Corporation
5 */
7 #include "gem/i915_gem_object.h"
9 #include "i915_drv.h"
10 #include "intel_engine_pm.h"
11 #include "intel_engine_pool.h"
13 static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
15 return container_of(pool, struct intel_engine_cs, pool);
18 static struct list_head *
19 bucket_for_size(struct intel_engine_pool *pool, size_t sz)
21 int n;
24 * Compute a power-of-two bucket, but throw everything greater than
25 * 16KiB into the same bucket: i.e. the buckets hold objects of
26 * (1 page, 2 pages, 4 pages, 8+ pages).
28 n = fls(sz >> PAGE_SHIFT) - 1;
29 if (n >= ARRAY_SIZE(pool->cache_list))
30 n = ARRAY_SIZE(pool->cache_list) - 1;
32 return &pool->cache_list[n];
35 static void node_free(struct intel_engine_pool_node *node)
37 i915_gem_object_put(node->obj);
38 i915_active_fini(&node->active);
39 kfree(node);
42 static int pool_active(struct i915_active *ref)
44 struct intel_engine_pool_node *node =
45 container_of(ref, typeof(*node), active);
46 struct dma_resv *resv = node->obj->base.resv;
47 int err;
49 if (dma_resv_trylock(resv)) {
50 dma_resv_add_excl_fence(resv, NULL);
51 dma_resv_unlock(resv);
54 err = i915_gem_object_pin_pages(node->obj);
55 if (err)
56 return err;
58 /* Hide this pinned object from the shrinker until retired */
59 i915_gem_object_make_unshrinkable(node->obj);
61 return 0;
64 __i915_active_call
65 static void pool_retire(struct i915_active *ref)
67 struct intel_engine_pool_node *node =
68 container_of(ref, typeof(*node), active);
69 struct intel_engine_pool *pool = node->pool;
70 struct list_head *list = bucket_for_size(pool, node->obj->base.size);
71 unsigned long flags;
73 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
75 i915_gem_object_unpin_pages(node->obj);
77 /* Return this object to the shrinker pool */
78 i915_gem_object_make_purgeable(node->obj);
80 spin_lock_irqsave(&pool->lock, flags);
81 list_add(&node->link, list);
82 spin_unlock_irqrestore(&pool->lock, flags);
85 static struct intel_engine_pool_node *
86 node_create(struct intel_engine_pool *pool, size_t sz)
88 struct intel_engine_cs *engine = to_engine(pool);
89 struct intel_engine_pool_node *node;
90 struct drm_i915_gem_object *obj;
92 node = kmalloc(sizeof(*node),
93 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
94 if (!node)
95 return ERR_PTR(-ENOMEM);
97 node->pool = pool;
98 i915_active_init(&node->active, pool_active, pool_retire);
100 obj = i915_gem_object_create_internal(engine->i915, sz);
101 if (IS_ERR(obj)) {
102 i915_active_fini(&node->active);
103 kfree(node);
104 return ERR_CAST(obj);
107 i915_gem_object_set_readonly(obj);
109 node->obj = obj;
110 return node;
113 static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
115 if (intel_engine_is_virtual(engine))
116 engine = intel_virtual_engine_get_sibling(engine, 0);
118 GEM_BUG_ON(!engine);
119 return &engine->pool;
122 struct intel_engine_pool_node *
123 intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
125 struct intel_engine_pool *pool = lookup_pool(engine);
126 struct intel_engine_pool_node *node;
127 struct list_head *list;
128 unsigned long flags;
129 int ret;
131 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
133 size = PAGE_ALIGN(size);
134 list = bucket_for_size(pool, size);
136 spin_lock_irqsave(&pool->lock, flags);
137 list_for_each_entry(node, list, link) {
138 if (node->obj->base.size < size)
139 continue;
140 list_del(&node->link);
141 break;
143 spin_unlock_irqrestore(&pool->lock, flags);
145 if (&node->link == list) {
146 node = node_create(pool, size);
147 if (IS_ERR(node))
148 return node;
151 ret = i915_active_acquire(&node->active);
152 if (ret) {
153 node_free(node);
154 return ERR_PTR(ret);
157 return node;
160 void intel_engine_pool_init(struct intel_engine_pool *pool)
162 int n;
164 spin_lock_init(&pool->lock);
165 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
166 INIT_LIST_HEAD(&pool->cache_list[n]);
169 void intel_engine_pool_park(struct intel_engine_pool *pool)
171 int n;
173 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
174 struct list_head *list = &pool->cache_list[n];
175 struct intel_engine_pool_node *node, *nn;
177 list_for_each_entry_safe(node, nn, list, link)
178 node_free(node);
180 INIT_LIST_HEAD(list);
184 void intel_engine_pool_fini(struct intel_engine_pool *pool)
186 int n;
188 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
189 GEM_BUG_ON(!list_empty(&pool->cache_list[n]));