2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2018 Intel Corporation
7 #include "gem/i915_gem_object.h"
10 #include "intel_engine_pm.h"
11 #include "intel_engine_pool.h"
13 static struct intel_engine_cs
*to_engine(struct intel_engine_pool
*pool
)
15 return container_of(pool
, struct intel_engine_cs
, pool
);
18 static struct list_head
*
19 bucket_for_size(struct intel_engine_pool
*pool
, size_t sz
)
24 * Compute a power-of-two bucket, but throw everything greater than
25 * 16KiB into the same bucket: i.e. the buckets hold objects of
26 * (1 page, 2 pages, 4 pages, 8+ pages).
28 n
= fls(sz
>> PAGE_SHIFT
) - 1;
29 if (n
>= ARRAY_SIZE(pool
->cache_list
))
30 n
= ARRAY_SIZE(pool
->cache_list
) - 1;
32 return &pool
->cache_list
[n
];
35 static void node_free(struct intel_engine_pool_node
*node
)
37 i915_gem_object_put(node
->obj
);
38 i915_active_fini(&node
->active
);
42 static int pool_active(struct i915_active
*ref
)
44 struct intel_engine_pool_node
*node
=
45 container_of(ref
, typeof(*node
), active
);
46 struct dma_resv
*resv
= node
->obj
->base
.resv
;
49 if (dma_resv_trylock(resv
)) {
50 dma_resv_add_excl_fence(resv
, NULL
);
51 dma_resv_unlock(resv
);
54 err
= i915_gem_object_pin_pages(node
->obj
);
58 /* Hide this pinned object from the shrinker until retired */
59 i915_gem_object_make_unshrinkable(node
->obj
);
65 static void pool_retire(struct i915_active
*ref
)
67 struct intel_engine_pool_node
*node
=
68 container_of(ref
, typeof(*node
), active
);
69 struct intel_engine_pool
*pool
= node
->pool
;
70 struct list_head
*list
= bucket_for_size(pool
, node
->obj
->base
.size
);
73 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool
)));
75 i915_gem_object_unpin_pages(node
->obj
);
77 /* Return this object to the shrinker pool */
78 i915_gem_object_make_purgeable(node
->obj
);
80 spin_lock_irqsave(&pool
->lock
, flags
);
81 list_add(&node
->link
, list
);
82 spin_unlock_irqrestore(&pool
->lock
, flags
);
85 static struct intel_engine_pool_node
*
86 node_create(struct intel_engine_pool
*pool
, size_t sz
)
88 struct intel_engine_cs
*engine
= to_engine(pool
);
89 struct intel_engine_pool_node
*node
;
90 struct drm_i915_gem_object
*obj
;
92 node
= kmalloc(sizeof(*node
),
93 GFP_KERNEL
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
95 return ERR_PTR(-ENOMEM
);
98 i915_active_init(&node
->active
, pool_active
, pool_retire
);
100 obj
= i915_gem_object_create_internal(engine
->i915
, sz
);
102 i915_active_fini(&node
->active
);
104 return ERR_CAST(obj
);
107 i915_gem_object_set_readonly(obj
);
113 static struct intel_engine_pool
*lookup_pool(struct intel_engine_cs
*engine
)
115 if (intel_engine_is_virtual(engine
))
116 engine
= intel_virtual_engine_get_sibling(engine
, 0);
119 return &engine
->pool
;
122 struct intel_engine_pool_node
*
123 intel_engine_get_pool(struct intel_engine_cs
*engine
, size_t size
)
125 struct intel_engine_pool
*pool
= lookup_pool(engine
);
126 struct intel_engine_pool_node
*node
;
127 struct list_head
*list
;
131 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool
)));
133 size
= PAGE_ALIGN(size
);
134 list
= bucket_for_size(pool
, size
);
136 spin_lock_irqsave(&pool
->lock
, flags
);
137 list_for_each_entry(node
, list
, link
) {
138 if (node
->obj
->base
.size
< size
)
140 list_del(&node
->link
);
143 spin_unlock_irqrestore(&pool
->lock
, flags
);
145 if (&node
->link
== list
) {
146 node
= node_create(pool
, size
);
151 ret
= i915_active_acquire(&node
->active
);
160 void intel_engine_pool_init(struct intel_engine_pool
*pool
)
164 spin_lock_init(&pool
->lock
);
165 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++)
166 INIT_LIST_HEAD(&pool
->cache_list
[n
]);
169 void intel_engine_pool_park(struct intel_engine_pool
*pool
)
173 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++) {
174 struct list_head
*list
= &pool
->cache_list
[n
];
175 struct intel_engine_pool_node
*node
, *nn
;
177 list_for_each_entry_safe(node
, nn
, list
, link
)
180 INIT_LIST_HEAD(list
);
184 void intel_engine_pool_fini(struct intel_engine_pool
*pool
)
188 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++)
189 GEM_BUG_ON(!list_empty(&pool
->cache_list
[n
]));