2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/debugobjects.h>
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_ring.h"
14 #include "i915_active.h"
15 #include "i915_globals.h"
18 * Active refs memory management
20 * To be more economical with memory, we reap all the i915_active trees as
21 * they idle (when we know the active requests are inactive) and allocate the
22 * nodes from a local slab cache to hopefully reduce the fragmentation.
24 static struct i915_global_active
{
25 struct i915_global base
;
26 struct kmem_cache
*slab_cache
;
30 struct i915_active_fence base
;
31 struct i915_active
*ref
;
36 static inline struct active_node
*
37 node_from_active(struct i915_active_fence
*active
)
39 return container_of(active
, struct active_node
, base
);
42 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
44 static inline bool is_barrier(const struct i915_active_fence
*active
)
46 return IS_ERR(rcu_access_pointer(active
->fence
));
49 static inline struct llist_node
*barrier_to_ll(struct active_node
*node
)
51 GEM_BUG_ON(!is_barrier(&node
->base
));
52 return (struct llist_node
*)&node
->base
.cb
.node
;
55 static inline struct intel_engine_cs
*
56 __barrier_to_engine(struct active_node
*node
)
58 return (struct intel_engine_cs
*)READ_ONCE(node
->base
.cb
.node
.prev
);
61 static inline struct intel_engine_cs
*
62 barrier_to_engine(struct active_node
*node
)
64 GEM_BUG_ON(!is_barrier(&node
->base
));
65 return __barrier_to_engine(node
);
68 static inline struct active_node
*barrier_from_ll(struct llist_node
*x
)
70 return container_of((struct list_head
*)x
,
71 struct active_node
, base
.cb
.node
);
74 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
76 static void *active_debug_hint(void *addr
)
78 struct i915_active
*ref
= addr
;
80 return (void *)ref
->active
?: (void *)ref
->retire
?: (void *)ref
;
83 static struct debug_obj_descr active_debug_desc
= {
84 .name
= "i915_active",
85 .debug_hint
= active_debug_hint
,
88 static void debug_active_init(struct i915_active
*ref
)
90 debug_object_init(ref
, &active_debug_desc
);
93 static void debug_active_activate(struct i915_active
*ref
)
95 lockdep_assert_held(&ref
->tree_lock
);
96 if (!atomic_read(&ref
->count
)) /* before the first inc */
97 debug_object_activate(ref
, &active_debug_desc
);
100 static void debug_active_deactivate(struct i915_active
*ref
)
102 lockdep_assert_held(&ref
->tree_lock
);
103 if (!atomic_read(&ref
->count
)) /* after the last dec */
104 debug_object_deactivate(ref
, &active_debug_desc
);
107 static void debug_active_fini(struct i915_active
*ref
)
109 debug_object_free(ref
, &active_debug_desc
);
112 static void debug_active_assert(struct i915_active
*ref
)
114 debug_object_assert_init(ref
, &active_debug_desc
);
119 static inline void debug_active_init(struct i915_active
*ref
) { }
120 static inline void debug_active_activate(struct i915_active
*ref
) { }
121 static inline void debug_active_deactivate(struct i915_active
*ref
) { }
122 static inline void debug_active_fini(struct i915_active
*ref
) { }
123 static inline void debug_active_assert(struct i915_active
*ref
) { }
128 __active_retire(struct i915_active
*ref
)
130 struct active_node
*it
, *n
;
134 GEM_BUG_ON(i915_active_is_idle(ref
));
136 /* return the unused nodes to our slabcache -- flushing the allocator */
137 if (!atomic_dec_and_lock_irqsave(&ref
->count
, &ref
->tree_lock
, flags
))
140 GEM_BUG_ON(rcu_access_pointer(ref
->excl
.fence
));
141 debug_active_deactivate(ref
);
147 spin_unlock_irqrestore(&ref
->tree_lock
, flags
);
149 /* After the final retire, the entire struct may be freed */
153 /* ... except if you wait on it, you must manage your own references! */
156 rbtree_postorder_for_each_entry_safe(it
, n
, &root
, node
) {
157 GEM_BUG_ON(i915_active_fence_isset(&it
->base
));
158 kmem_cache_free(global
.slab_cache
, it
);
163 active_work(struct work_struct
*wrk
)
165 struct i915_active
*ref
= container_of(wrk
, typeof(*ref
), work
);
167 GEM_BUG_ON(!atomic_read(&ref
->count
));
168 if (atomic_add_unless(&ref
->count
, -1, 1))
171 __active_retire(ref
);
175 active_retire(struct i915_active
*ref
)
177 GEM_BUG_ON(!atomic_read(&ref
->count
));
178 if (atomic_add_unless(&ref
->count
, -1, 1))
181 if (ref
->flags
& I915_ACTIVE_RETIRE_SLEEPS
) {
182 queue_work(system_unbound_wq
, &ref
->work
);
186 __active_retire(ref
);
189 static inline struct dma_fence
**
190 __active_fence_slot(struct i915_active_fence
*active
)
192 return (struct dma_fence
** __force
)&active
->fence
;
196 active_fence_cb(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
198 struct i915_active_fence
*active
=
199 container_of(cb
, typeof(*active
), cb
);
201 return cmpxchg(__active_fence_slot(active
), fence
, NULL
) == fence
;
205 node_retire(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
207 if (active_fence_cb(fence
, cb
))
208 active_retire(container_of(cb
, struct active_node
, base
.cb
)->ref
);
212 excl_retire(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
214 if (active_fence_cb(fence
, cb
))
215 active_retire(container_of(cb
, struct i915_active
, excl
.cb
));
218 static struct i915_active_fence
*
219 active_instance(struct i915_active
*ref
, struct intel_timeline
*tl
)
221 struct active_node
*node
, *prealloc
;
222 struct rb_node
**p
, *parent
;
223 u64 idx
= tl
->fence_context
;
226 * We track the most recently used timeline to skip a rbtree search
227 * for the common case, under typical loads we never need the rbtree
228 * at all. We can reuse the last slot if it is empty, that is
229 * after the previous activity has been retired, or if it matches the
232 node
= READ_ONCE(ref
->cache
);
233 if (node
&& node
->timeline
== idx
)
236 /* Preallocate a replacement, just in case */
237 prealloc
= kmem_cache_alloc(global
.slab_cache
, GFP_KERNEL
);
241 spin_lock_irq(&ref
->tree_lock
);
242 GEM_BUG_ON(i915_active_is_idle(ref
));
245 p
= &ref
->tree
.rb_node
;
249 node
= rb_entry(parent
, struct active_node
, node
);
250 if (node
->timeline
== idx
) {
251 kmem_cache_free(global
.slab_cache
, prealloc
);
255 if (node
->timeline
< idx
)
256 p
= &parent
->rb_right
;
258 p
= &parent
->rb_left
;
262 __i915_active_fence_init(&node
->base
, NULL
, node_retire
);
264 node
->timeline
= idx
;
266 rb_link_node(&node
->node
, parent
, p
);
267 rb_insert_color(&node
->node
, &ref
->tree
);
271 spin_unlock_irq(&ref
->tree_lock
);
273 BUILD_BUG_ON(offsetof(typeof(*node
), base
));
277 void __i915_active_init(struct i915_active
*ref
,
278 int (*active
)(struct i915_active
*ref
),
279 void (*retire
)(struct i915_active
*ref
),
280 struct lock_class_key
*mkey
,
281 struct lock_class_key
*wkey
)
285 debug_active_init(ref
);
288 ref
->active
= active
;
289 ref
->retire
= ptr_unpack_bits(retire
, &bits
, 2);
290 if (bits
& I915_ACTIVE_MAY_SLEEP
)
291 ref
->flags
|= I915_ACTIVE_RETIRE_SLEEPS
;
293 spin_lock_init(&ref
->tree_lock
);
297 init_llist_head(&ref
->preallocated_barriers
);
298 atomic_set(&ref
->count
, 0);
299 __mutex_init(&ref
->mutex
, "i915_active", mkey
);
300 __i915_active_fence_init(&ref
->excl
, NULL
, excl_retire
);
301 INIT_WORK(&ref
->work
, active_work
);
302 #if IS_ENABLED(CONFIG_LOCKDEP)
303 lockdep_init_map(&ref
->work
.lockdep_map
, "i915_active.work", wkey
, 0);
307 static bool ____active_del_barrier(struct i915_active
*ref
,
308 struct active_node
*node
,
309 struct intel_engine_cs
*engine
)
312 struct llist_node
*head
= NULL
, *tail
= NULL
;
313 struct llist_node
*pos
, *next
;
315 GEM_BUG_ON(node
->timeline
!= engine
->kernel_context
->timeline
->fence_context
);
318 * Rebuild the llist excluding our node. We may perform this
319 * outside of the kernel_context timeline mutex and so someone
320 * else may be manipulating the engine->barrier_tasks, in
321 * which case either we or they will be upset :)
323 * A second __active_del_barrier() will report failure to claim
324 * the active_node and the caller will just shrug and know not to
325 * claim ownership of its node.
327 * A concurrent i915_request_add_active_barriers() will miss adding
328 * any of the tasks, but we will try again on the next -- and since
329 * we are actively using the barrier, we know that there will be
330 * at least another opportunity when we idle.
332 llist_for_each_safe(pos
, next
, llist_del_all(&engine
->barrier_tasks
)) {
333 if (node
== barrier_from_ll(pos
)) {
344 llist_add_batch(head
, tail
, &engine
->barrier_tasks
);
350 __active_del_barrier(struct i915_active
*ref
, struct active_node
*node
)
352 return ____active_del_barrier(ref
, node
, barrier_to_engine(node
));
355 int i915_active_ref(struct i915_active
*ref
,
356 struct intel_timeline
*tl
,
357 struct dma_fence
*fence
)
359 struct i915_active_fence
*active
;
362 lockdep_assert_held(&tl
->mutex
);
364 /* Prevent reaping in case we malloc/wait while building the tree */
365 err
= i915_active_acquire(ref
);
369 active
= active_instance(ref
, tl
);
375 if (is_barrier(active
)) { /* proto-node used by our idle barrier */
377 * This request is on the kernel_context timeline, and so
378 * we can use it to substitute for the pending idle-barrer
379 * request that we want to emit on the kernel_context.
381 __active_del_barrier(ref
, node_from_active(active
));
382 RCU_INIT_POINTER(active
->fence
, NULL
);
383 atomic_dec(&ref
->count
);
385 if (!__i915_active_fence_set(active
, fence
))
386 atomic_inc(&ref
->count
);
389 i915_active_release(ref
);
393 void i915_active_set_exclusive(struct i915_active
*ref
, struct dma_fence
*f
)
395 /* We expect the caller to manage the exclusive timeline ordering */
396 GEM_BUG_ON(i915_active_is_idle(ref
));
398 if (!__i915_active_fence_set(&ref
->excl
, f
))
399 atomic_inc(&ref
->count
);
402 bool i915_active_acquire_if_busy(struct i915_active
*ref
)
404 debug_active_assert(ref
);
405 return atomic_add_unless(&ref
->count
, 1, 0);
408 int i915_active_acquire(struct i915_active
*ref
)
412 if (i915_active_acquire_if_busy(ref
))
415 err
= mutex_lock_interruptible(&ref
->mutex
);
419 if (!atomic_read(&ref
->count
) && ref
->active
)
420 err
= ref
->active(ref
);
422 spin_lock_irq(&ref
->tree_lock
); /* vs __active_retire() */
423 debug_active_activate(ref
);
424 atomic_inc(&ref
->count
);
425 spin_unlock_irq(&ref
->tree_lock
);
428 mutex_unlock(&ref
->mutex
);
433 void i915_active_release(struct i915_active
*ref
)
435 debug_active_assert(ref
);
439 static void enable_signaling(struct i915_active_fence
*active
)
441 struct dma_fence
*fence
;
443 fence
= i915_active_fence_get(active
);
447 dma_fence_enable_sw_signaling(fence
);
448 dma_fence_put(fence
);
451 int i915_active_wait(struct i915_active
*ref
)
453 struct active_node
*it
, *n
;
458 if (!i915_active_acquire_if_busy(ref
))
461 /* Flush lazy signals */
462 enable_signaling(&ref
->excl
);
463 rbtree_postorder_for_each_entry_safe(it
, n
, &ref
->tree
, node
) {
464 if (is_barrier(&it
->base
)) /* unconnected idle barrier */
467 enable_signaling(&it
->base
);
469 /* Any fence added after the wait begins will not be auto-signaled */
471 i915_active_release(ref
);
475 if (wait_var_event_interruptible(ref
, i915_active_is_idle(ref
)))
478 flush_work(&ref
->work
);
482 int i915_request_await_active(struct i915_request
*rq
, struct i915_active
*ref
)
486 if (rcu_access_pointer(ref
->excl
.fence
)) {
487 struct dma_fence
*fence
;
490 fence
= dma_fence_get_rcu_safe(&ref
->excl
.fence
);
493 err
= i915_request_await_dma_fence(rq
, fence
);
494 dma_fence_put(fence
);
498 /* In the future we may choose to await on all fences */
503 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
504 void i915_active_fini(struct i915_active
*ref
)
506 debug_active_fini(ref
);
507 GEM_BUG_ON(atomic_read(&ref
->count
));
508 GEM_BUG_ON(work_pending(&ref
->work
));
509 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref
->tree
));
510 mutex_destroy(&ref
->mutex
);
514 static inline bool is_idle_barrier(struct active_node
*node
, u64 idx
)
516 return node
->timeline
== idx
&& !i915_active_fence_isset(&node
->base
);
519 static struct active_node
*reuse_idle_barrier(struct i915_active
*ref
, u64 idx
)
521 struct rb_node
*prev
, *p
;
523 if (RB_EMPTY_ROOT(&ref
->tree
))
526 spin_lock_irq(&ref
->tree_lock
);
527 GEM_BUG_ON(i915_active_is_idle(ref
));
530 * Try to reuse any existing barrier nodes already allocated for this
531 * i915_active, due to overlapping active phases there is likely a
532 * node kept alive (as we reuse before parking). We prefer to reuse
533 * completely idle barriers (less hassle in manipulating the llists),
534 * but otherwise any will do.
536 if (ref
->cache
&& is_idle_barrier(ref
->cache
, idx
)) {
537 p
= &ref
->cache
->node
;
542 p
= ref
->tree
.rb_node
;
544 struct active_node
*node
=
545 rb_entry(p
, struct active_node
, node
);
547 if (is_idle_barrier(node
, idx
))
551 if (node
->timeline
< idx
)
558 * No quick match, but we did find the leftmost rb_node for the
559 * kernel_context. Walk the rb_tree in-order to see if there were
560 * any idle-barriers on this timeline that we missed, or just use
561 * the first pending barrier.
563 for (p
= prev
; p
; p
= rb_next(p
)) {
564 struct active_node
*node
=
565 rb_entry(p
, struct active_node
, node
);
566 struct intel_engine_cs
*engine
;
568 if (node
->timeline
> idx
)
571 if (node
->timeline
< idx
)
574 if (is_idle_barrier(node
, idx
))
578 * The list of pending barriers is protected by the
579 * kernel_context timeline, which notably we do not hold
580 * here. i915_request_add_active_barriers() may consume
581 * the barrier before we claim it, so we have to check
584 engine
= __barrier_to_engine(node
);
585 smp_rmb(); /* serialise with add_active_barriers */
586 if (is_barrier(&node
->base
) &&
587 ____active_del_barrier(ref
, node
, engine
))
591 spin_unlock_irq(&ref
->tree_lock
);
596 rb_erase(p
, &ref
->tree
); /* Hide from waits and sibling allocations */
597 if (p
== &ref
->cache
->node
)
599 spin_unlock_irq(&ref
->tree_lock
);
601 return rb_entry(p
, struct active_node
, node
);
604 int i915_active_acquire_preallocate_barrier(struct i915_active
*ref
,
605 struct intel_engine_cs
*engine
)
607 intel_engine_mask_t tmp
, mask
= engine
->mask
;
608 struct llist_node
*pos
= NULL
, *next
;
609 struct intel_gt
*gt
= engine
->gt
;
612 GEM_BUG_ON(i915_active_is_idle(ref
));
614 /* Wait until the previous preallocation is completed */
615 while (!llist_empty(&ref
->preallocated_barriers
))
619 * Preallocate a node for each physical engine supporting the target
620 * engine (remember virtual engines have more than one sibling).
621 * We can then use the preallocated nodes in
622 * i915_active_acquire_barrier()
624 for_each_engine_masked(engine
, gt
, mask
, tmp
) {
625 u64 idx
= engine
->kernel_context
->timeline
->fence_context
;
626 struct active_node
*node
;
628 node
= reuse_idle_barrier(ref
, idx
);
630 node
= kmem_cache_alloc(global
.slab_cache
, GFP_KERNEL
);
636 RCU_INIT_POINTER(node
->base
.fence
, NULL
);
637 node
->base
.cb
.func
= node_retire
;
638 node
->timeline
= idx
;
642 if (!i915_active_fence_isset(&node
->base
)) {
644 * Mark this as being *our* unconnected proto-node.
646 * Since this node is not in any list, and we have
647 * decoupled it from the rbtree, we can reuse the
648 * request to indicate this is an idle-barrier node
649 * and then we can use the rb_node and list pointers
650 * for our tracking of the pending barrier.
652 RCU_INIT_POINTER(node
->base
.fence
, ERR_PTR(-EAGAIN
));
653 node
->base
.cb
.node
.prev
= (void *)engine
;
654 atomic_inc(&ref
->count
);
656 GEM_BUG_ON(rcu_access_pointer(node
->base
.fence
) != ERR_PTR(-EAGAIN
));
658 GEM_BUG_ON(barrier_to_engine(node
) != engine
);
659 next
= barrier_to_ll(node
);
663 intel_engine_pm_get(engine
);
666 GEM_BUG_ON(!llist_empty(&ref
->preallocated_barriers
));
667 llist_add_batch(next
, pos
, &ref
->preallocated_barriers
);
673 struct active_node
*node
= barrier_from_ll(pos
);
677 atomic_dec(&ref
->count
);
678 intel_engine_pm_put(barrier_to_engine(node
));
680 kmem_cache_free(global
.slab_cache
, node
);
685 void i915_active_acquire_barrier(struct i915_active
*ref
)
687 struct llist_node
*pos
, *next
;
690 GEM_BUG_ON(i915_active_is_idle(ref
));
693 * Transfer the list of preallocated barriers into the
694 * i915_active rbtree, but only as proto-nodes. They will be
695 * populated by i915_request_add_active_barriers() to point to the
696 * request that will eventually release them.
698 llist_for_each_safe(pos
, next
, take_preallocated_barriers(ref
)) {
699 struct active_node
*node
= barrier_from_ll(pos
);
700 struct intel_engine_cs
*engine
= barrier_to_engine(node
);
701 struct rb_node
**p
, *parent
;
703 spin_lock_irqsave_nested(&ref
->tree_lock
, flags
,
704 SINGLE_DEPTH_NESTING
);
706 p
= &ref
->tree
.rb_node
;
708 struct active_node
*it
;
712 it
= rb_entry(parent
, struct active_node
, node
);
713 if (it
->timeline
< node
->timeline
)
714 p
= &parent
->rb_right
;
716 p
= &parent
->rb_left
;
718 rb_link_node(&node
->node
, parent
, p
);
719 rb_insert_color(&node
->node
, &ref
->tree
);
720 spin_unlock_irqrestore(&ref
->tree_lock
, flags
);
722 GEM_BUG_ON(!intel_engine_pm_is_awake(engine
));
723 llist_add(barrier_to_ll(node
), &engine
->barrier_tasks
);
724 intel_engine_pm_put(engine
);
728 static struct dma_fence
**ll_to_fence_slot(struct llist_node
*node
)
730 return __active_fence_slot(&barrier_from_ll(node
)->base
);
733 void i915_request_add_active_barriers(struct i915_request
*rq
)
735 struct intel_engine_cs
*engine
= rq
->engine
;
736 struct llist_node
*node
, *next
;
739 GEM_BUG_ON(!intel_context_is_barrier(rq
->context
));
740 GEM_BUG_ON(intel_engine_is_virtual(engine
));
741 GEM_BUG_ON(i915_request_timeline(rq
) != engine
->kernel_context
->timeline
);
743 node
= llist_del_all(&engine
->barrier_tasks
);
747 * Attach the list of proto-fences to the in-flight request such
748 * that the parent i915_active will be released when this request
751 spin_lock_irqsave(&rq
->lock
, flags
);
752 llist_for_each_safe(node
, next
, node
) {
753 /* serialise with reuse_idle_barrier */
754 smp_store_mb(*ll_to_fence_slot(node
), &rq
->fence
);
755 list_add_tail((struct list_head
*)node
, &rq
->fence
.cb_list
);
757 spin_unlock_irqrestore(&rq
->lock
, flags
);
761 * __i915_active_fence_set: Update the last active fence along its timeline
762 * @active: the active tracker
763 * @fence: the new fence (under construction)
765 * Records the new @fence as the last active fence along its timeline in
766 * this active tracker, moving the tracking callbacks from the previous
767 * fence onto this one. Returns the previous fence (if not already completed),
768 * which the caller must ensure is executed before the new fence. To ensure
769 * that the order of fences within the timeline of the i915_active_fence is
770 * understood, it should be locked by the caller.
773 __i915_active_fence_set(struct i915_active_fence
*active
,
774 struct dma_fence
*fence
)
776 struct dma_fence
*prev
;
779 if (fence
== rcu_access_pointer(active
->fence
))
782 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
));
785 * Consider that we have two threads arriving (A and B), with
786 * C already resident as the active->fence.
788 * A does the xchg first, and so it sees C or NULL depending
789 * on the timing of the interrupt handler. If it is NULL, the
790 * previous fence must have been signaled and we know that
791 * we are first on the timeline. If it is still present,
792 * we acquire the lock on that fence and serialise with the interrupt
793 * handler, in the process removing it from any future interrupt
794 * callback. A will then wait on C before executing (if present).
796 * As B is second, it sees A as the previous fence and so waits for
797 * it to complete its transition and takes over the occupancy for
798 * itself -- remembering that it needs to wait on A before executing.
800 * Note the strong ordering of the timeline also provides consistent
801 * nesting rules for the fence->lock; the inner lock is always the
804 spin_lock_irqsave(fence
->lock
, flags
);
805 prev
= xchg(__active_fence_slot(active
), fence
);
807 GEM_BUG_ON(prev
== fence
);
808 spin_lock_nested(prev
->lock
, SINGLE_DEPTH_NESTING
);
809 __list_del_entry(&active
->cb
.node
);
810 spin_unlock(prev
->lock
); /* serialise with prev->cb_list */
812 GEM_BUG_ON(rcu_access_pointer(active
->fence
) != fence
);
813 list_add_tail(&active
->cb
.node
, &fence
->cb_list
);
814 spin_unlock_irqrestore(fence
->lock
, flags
);
819 int i915_active_fence_set(struct i915_active_fence
*active
,
820 struct i915_request
*rq
)
822 struct dma_fence
*fence
;
825 /* Must maintain timeline ordering wrt previous active requests */
827 fence
= __i915_active_fence_set(active
, &rq
->fence
);
828 if (fence
) /* but the previous fence may not belong to that timeline! */
829 fence
= dma_fence_get_rcu(fence
);
832 err
= i915_request_await_dma_fence(rq
, fence
);
833 dma_fence_put(fence
);
839 void i915_active_noop(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
841 active_fence_cb(fence
, cb
);
844 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
845 #include "selftests/i915_active.c"
848 static void i915_global_active_shrink(void)
850 kmem_cache_shrink(global
.slab_cache
);
853 static void i915_global_active_exit(void)
855 kmem_cache_destroy(global
.slab_cache
);
858 static struct i915_global_active global
= { {
859 .shrink
= i915_global_active_shrink
,
860 .exit
= i915_global_active_exit
,
863 int __init
i915_global_active_init(void)
865 global
.slab_cache
= KMEM_CACHE(active_node
, SLAB_HWCACHE_ALIGN
);
866 if (!global
.slab_cache
)
869 i915_global_register(&global
.base
);