1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2018 Intel Corporation
6 #include "gem/i915_gem_object.h"
9 #include "intel_engine_pm.h"
10 #include "intel_gt_buffer_pool.h"
12 static struct intel_gt
*to_gt(struct intel_gt_buffer_pool
*pool
)
14 return container_of(pool
, struct intel_gt
, buffer_pool
);
17 static struct list_head
*
18 bucket_for_size(struct intel_gt_buffer_pool
*pool
, size_t sz
)
23 * Compute a power-of-two bucket, but throw everything greater than
24 * 16KiB into the same bucket: i.e. the buckets hold objects of
25 * (1 page, 2 pages, 4 pages, 8+ pages).
27 n
= fls(sz
>> PAGE_SHIFT
) - 1;
28 if (n
>= ARRAY_SIZE(pool
->cache_list
))
29 n
= ARRAY_SIZE(pool
->cache_list
) - 1;
31 return &pool
->cache_list
[n
];
34 static void node_free(struct intel_gt_buffer_pool_node
*node
)
36 i915_gem_object_put(node
->obj
);
37 i915_active_fini(&node
->active
);
41 static bool pool_free_older_than(struct intel_gt_buffer_pool
*pool
, long keep
)
43 struct intel_gt_buffer_pool_node
*node
, *stale
= NULL
;
47 /* Free buffers that have not been used in the past second */
48 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++) {
49 struct list_head
*list
= &pool
->cache_list
[n
];
54 if (spin_trylock_irq(&pool
->lock
)) {
55 struct list_head
*pos
;
57 /* Most recent at head; oldest at tail */
58 list_for_each_prev(pos
, list
) {
61 node
= list_entry(pos
, typeof(*node
), link
);
63 age
= READ_ONCE(node
->age
);
64 if (!age
|| jiffies
- age
< keep
)
67 /* Check we are the first to claim this node */
68 if (!xchg(&node
->age
, 0))
74 if (!list_is_last(pos
, list
))
75 __list_del_many(pos
, list
);
77 spin_unlock_irq(&pool
->lock
);
80 active
|= !list_empty(list
);
83 while ((node
= stale
)) {
91 static void pool_free_work(struct work_struct
*wrk
)
93 struct intel_gt_buffer_pool
*pool
=
94 container_of(wrk
, typeof(*pool
), work
.work
);
96 if (pool_free_older_than(pool
, HZ
))
97 schedule_delayed_work(&pool
->work
,
98 round_jiffies_up_relative(HZ
));
101 static int pool_active(struct i915_active
*ref
)
103 struct intel_gt_buffer_pool_node
*node
=
104 container_of(ref
, typeof(*node
), active
);
105 struct dma_resv
*resv
= node
->obj
->base
.resv
;
108 if (dma_resv_trylock(resv
)) {
109 dma_resv_add_excl_fence(resv
, NULL
);
110 dma_resv_unlock(resv
);
113 err
= i915_gem_object_pin_pages(node
->obj
);
117 /* Hide this pinned object from the shrinker until retired */
118 i915_gem_object_make_unshrinkable(node
->obj
);
124 static void pool_retire(struct i915_active
*ref
)
126 struct intel_gt_buffer_pool_node
*node
=
127 container_of(ref
, typeof(*node
), active
);
128 struct intel_gt_buffer_pool
*pool
= node
->pool
;
129 struct list_head
*list
= bucket_for_size(pool
, node
->obj
->base
.size
);
132 i915_gem_object_unpin_pages(node
->obj
);
134 /* Return this object to the shrinker pool */
135 i915_gem_object_make_purgeable(node
->obj
);
137 GEM_BUG_ON(node
->age
);
138 spin_lock_irqsave(&pool
->lock
, flags
);
139 list_add_rcu(&node
->link
, list
);
140 WRITE_ONCE(node
->age
, jiffies
?: 1); /* 0 reserved for active nodes */
141 spin_unlock_irqrestore(&pool
->lock
, flags
);
143 schedule_delayed_work(&pool
->work
,
144 round_jiffies_up_relative(HZ
));
147 static struct intel_gt_buffer_pool_node
*
148 node_create(struct intel_gt_buffer_pool
*pool
, size_t sz
)
150 struct intel_gt
*gt
= to_gt(pool
);
151 struct intel_gt_buffer_pool_node
*node
;
152 struct drm_i915_gem_object
*obj
;
154 node
= kmalloc(sizeof(*node
),
155 GFP_KERNEL
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
157 return ERR_PTR(-ENOMEM
);
161 i915_active_init(&node
->active
, pool_active
, pool_retire
);
163 obj
= i915_gem_object_create_internal(gt
->i915
, sz
);
165 i915_active_fini(&node
->active
);
167 return ERR_CAST(obj
);
170 i915_gem_object_set_readonly(obj
);
176 struct intel_gt_buffer_pool_node
*
177 intel_gt_get_buffer_pool(struct intel_gt
*gt
, size_t size
)
179 struct intel_gt_buffer_pool
*pool
= >
->buffer_pool
;
180 struct intel_gt_buffer_pool_node
*node
;
181 struct list_head
*list
;
184 size
= PAGE_ALIGN(size
);
185 list
= bucket_for_size(pool
, size
);
188 list_for_each_entry_rcu(node
, list
, link
) {
191 if (node
->obj
->base
.size
< size
)
194 age
= READ_ONCE(node
->age
);
198 if (cmpxchg(&node
->age
, age
, 0) == age
) {
199 spin_lock_irq(&pool
->lock
);
200 list_del_rcu(&node
->link
);
201 spin_unlock_irq(&pool
->lock
);
207 if (&node
->link
== list
) {
208 node
= node_create(pool
, size
);
213 ret
= i915_active_acquire(&node
->active
);
222 void intel_gt_init_buffer_pool(struct intel_gt
*gt
)
224 struct intel_gt_buffer_pool
*pool
= >
->buffer_pool
;
227 spin_lock_init(&pool
->lock
);
228 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++)
229 INIT_LIST_HEAD(&pool
->cache_list
[n
]);
230 INIT_DELAYED_WORK(&pool
->work
, pool_free_work
);
233 void intel_gt_flush_buffer_pool(struct intel_gt
*gt
)
235 struct intel_gt_buffer_pool
*pool
= >
->buffer_pool
;
238 while (pool_free_older_than(pool
, 0))
240 } while (cancel_delayed_work_sync(&pool
->work
));
243 void intel_gt_fini_buffer_pool(struct intel_gt
*gt
)
245 struct intel_gt_buffer_pool
*pool
= >
->buffer_pool
;
248 intel_gt_flush_buffer_pool(gt
);
250 for (n
= 0; n
< ARRAY_SIZE(pool
->cache_list
); n
++)
251 GEM_BUG_ON(!list_empty(&pool
->cache_list
[n
]));