WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
blob9d5248be746f38b576414e12685dcff7f350d2c9
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_gpu.h"
10 #include "msm_gpu_trace.h"
12 static unsigned long
13 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
15 struct msm_drm_private *priv =
16 container_of(shrinker, struct msm_drm_private, shrinker);
17 struct msm_gem_object *msm_obj;
18 unsigned long count = 0;
20 mutex_lock(&priv->mm_lock);
22 list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
23 if (!msm_gem_trylock(&msm_obj->base))
24 continue;
25 if (is_purgeable(msm_obj))
26 count += msm_obj->base.size >> PAGE_SHIFT;
27 msm_gem_unlock(&msm_obj->base);
30 mutex_unlock(&priv->mm_lock);
32 return count;
35 static unsigned long
36 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
38 struct msm_drm_private *priv =
39 container_of(shrinker, struct msm_drm_private, shrinker);
40 struct msm_gem_object *msm_obj;
41 unsigned long freed = 0;
43 mutex_lock(&priv->mm_lock);
45 list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
46 if (freed >= sc->nr_to_scan)
47 break;
48 if (!msm_gem_trylock(&msm_obj->base))
49 continue;
50 if (is_purgeable(msm_obj)) {
51 msm_gem_purge(&msm_obj->base);
52 freed += msm_obj->base.size >> PAGE_SHIFT;
54 msm_gem_unlock(&msm_obj->base);
57 mutex_unlock(&priv->mm_lock);
59 if (freed > 0)
60 trace_msm_gem_purge(freed << PAGE_SHIFT);
62 return freed;
65 /* since we don't know any better, lets bail after a few
66 * and if necessary the shrinker will be invoked again.
67 * Seems better than unmapping *everything*
69 static const int vmap_shrink_limit = 15;
71 static unsigned
72 vmap_shrink(struct list_head *mm_list)
74 struct msm_gem_object *msm_obj;
75 unsigned unmapped = 0;
77 list_for_each_entry(msm_obj, mm_list, mm_list) {
78 if (!msm_gem_trylock(&msm_obj->base))
79 continue;
80 if (is_vunmapable(msm_obj)) {
81 msm_gem_vunmap(&msm_obj->base);
82 unmapped++;
84 msm_gem_unlock(&msm_obj->base);
86 if (++unmapped >= vmap_shrink_limit)
87 break;
90 return unmapped;
93 static int
94 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
96 struct msm_drm_private *priv =
97 container_of(nb, struct msm_drm_private, vmap_notifier);
98 struct list_head *mm_lists[] = {
99 &priv->inactive_dontneed,
100 &priv->inactive_willneed,
101 priv->gpu ? &priv->gpu->active_list : NULL,
102 NULL,
104 unsigned idx, unmapped = 0;
106 mutex_lock(&priv->mm_lock);
108 for (idx = 0; mm_lists[idx]; idx++) {
109 unmapped += vmap_shrink(mm_lists[idx]);
111 if (unmapped >= vmap_shrink_limit)
112 break;
115 mutex_unlock(&priv->mm_lock);
117 *(unsigned long *)ptr += unmapped;
119 if (unmapped > 0)
120 trace_msm_gem_purge_vmaps(unmapped);
122 return NOTIFY_DONE;
126 * msm_gem_shrinker_init - Initialize msm shrinker
127 * @dev: drm device
129 * This function registers and sets up the msm shrinker.
131 void msm_gem_shrinker_init(struct drm_device *dev)
133 struct msm_drm_private *priv = dev->dev_private;
134 priv->shrinker.count_objects = msm_gem_shrinker_count;
135 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
136 priv->shrinker.seeks = DEFAULT_SEEKS;
137 WARN_ON(register_shrinker(&priv->shrinker));
139 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
140 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
144 * msm_gem_shrinker_cleanup - Clean up msm shrinker
145 * @dev: drm device
147 * This function unregisters the msm shrinker.
149 void msm_gem_shrinker_cleanup(struct drm_device *dev)
151 struct msm_drm_private *priv = dev->dev_private;
153 if (priv->shrinker.nr_deferred) {
154 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
155 unregister_shrinker(&priv->shrinker);