Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / i915_globals.c
blob3aa2136842935a0343098fe210e54aec4a2d136b
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
5 */
7 #include <linux/slab.h>
8 #include <linux/workqueue.h>
10 #include "i915_active.h"
11 #include "gem/i915_gem_context.h"
12 #include "gem/i915_gem_object.h"
13 #include "i915_globals.h"
14 #include "i915_request.h"
15 #include "i915_scheduler.h"
16 #include "i915_vma.h"
18 static LIST_HEAD(globals);
20 static atomic_t active;
21 static atomic_t epoch;
22 static struct park_work {
23 struct delayed_work work;
24 struct rcu_head rcu;
25 unsigned long flags;
26 #define PENDING 0
27 int epoch;
28 } park;
30 static void i915_globals_shrink(void)
32 struct i915_global *global;
35 * kmem_cache_shrink() discards empty slabs and reorders partially
36 * filled slabs to prioritise allocating from the mostly full slabs,
37 * with the aim of reducing fragmentation.
39 list_for_each_entry(global, &globals, link)
40 global->shrink();
43 static void __i915_globals_grace(struct rcu_head *rcu)
45 /* Ratelimit parking as shrinking is quite slow */
46 schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
49 static void __i915_globals_queue_rcu(void)
51 park.epoch = atomic_inc_return(&epoch);
52 if (!atomic_read(&active)) {
53 init_rcu_head(&park.rcu);
54 call_rcu(&park.rcu, __i915_globals_grace);
58 static void __i915_globals_park(struct work_struct *work)
60 destroy_rcu_head(&park.rcu);
62 /* Confirm nothing woke up in the last grace period */
63 if (park.epoch != atomic_read(&epoch)) {
64 __i915_globals_queue_rcu();
65 return;
68 clear_bit(PENDING, &park.flags);
69 i915_globals_shrink();
72 void __init i915_global_register(struct i915_global *global)
74 GEM_BUG_ON(!global->shrink);
75 GEM_BUG_ON(!global->exit);
77 list_add_tail(&global->link, &globals);
80 static void __i915_globals_cleanup(void)
82 struct i915_global *global, *next;
84 list_for_each_entry_safe_reverse(global, next, &globals, link)
85 global->exit();
88 static __initconst int (* const initfn[])(void) = {
89 i915_global_active_init,
90 i915_global_buddy_init,
91 i915_global_context_init,
92 i915_global_gem_context_init,
93 i915_global_objects_init,
94 i915_global_request_init,
95 i915_global_scheduler_init,
96 i915_global_vma_init,
99 int __init i915_globals_init(void)
101 int i;
103 for (i = 0; i < ARRAY_SIZE(initfn); i++) {
104 int err;
106 err = initfn[i]();
107 if (err) {
108 __i915_globals_cleanup();
109 return err;
113 INIT_DELAYED_WORK(&park.work, __i915_globals_park);
114 return 0;
117 void i915_globals_park(void)
120 * Defer shrinking the global slab caches (and other work) until
121 * after a RCU grace period has completed with no activity. This
122 * is to try and reduce the latency impact on the consumers caused
123 * by us shrinking the caches the same time as they are trying to
124 * allocate, with the assumption being that if we idle long enough
125 * for an RCU grace period to elapse since the last use, it is likely
126 * to be longer until we need the caches again.
128 if (!atomic_dec_and_test(&active))
129 return;
131 /* Queue cleanup after the next RCU grace period has freed slabs */
132 if (!test_and_set_bit(PENDING, &park.flags))
133 __i915_globals_queue_rcu();
136 void i915_globals_unpark(void)
138 atomic_inc(&epoch);
139 atomic_inc(&active);
142 static void __exit __i915_globals_flush(void)
144 atomic_inc(&active); /* skip shrinking */
146 rcu_barrier(); /* wait for the work to be queued */
147 flush_delayed_work(&park.work);
149 atomic_dec(&active);
152 void __exit i915_globals_exit(void)
154 GEM_BUG_ON(atomic_read(&active));
156 __i915_globals_flush();
157 __i915_globals_cleanup();
159 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
160 rcu_barrier();