1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
10 static bool msm_gem_shrinker_lock(struct drm_device
*dev
, bool *unlock
)
12 /* NOTE: we are *closer* to being able to get rid of
13 * mutex_trylock_recursive().. the msm_gem code itself does
14 * not need struct_mutex, although codepaths that can trigger
15 * shrinker are still called in code-paths that hold the
18 * Also, msm_obj->madv is protected by struct_mutex.
20 * The next step is probably split out a seperate lock for
21 * protecting inactive_list, so that shrinker does not need
24 switch (mutex_trylock_recursive(&dev
->struct_mutex
)) {
25 case MUTEX_TRYLOCK_FAILED
:
28 case MUTEX_TRYLOCK_SUCCESS
:
32 case MUTEX_TRYLOCK_RECURSIVE
:
41 msm_gem_shrinker_count(struct shrinker
*shrinker
, struct shrink_control
*sc
)
43 struct msm_drm_private
*priv
=
44 container_of(shrinker
, struct msm_drm_private
, shrinker
);
45 struct drm_device
*dev
= priv
->dev
;
46 struct msm_gem_object
*msm_obj
;
47 unsigned long count
= 0;
50 if (!msm_gem_shrinker_lock(dev
, &unlock
))
53 list_for_each_entry(msm_obj
, &priv
->inactive_list
, mm_list
) {
54 if (is_purgeable(msm_obj
))
55 count
+= msm_obj
->base
.size
>> PAGE_SHIFT
;
59 mutex_unlock(&dev
->struct_mutex
);
65 msm_gem_shrinker_scan(struct shrinker
*shrinker
, struct shrink_control
*sc
)
67 struct msm_drm_private
*priv
=
68 container_of(shrinker
, struct msm_drm_private
, shrinker
);
69 struct drm_device
*dev
= priv
->dev
;
70 struct msm_gem_object
*msm_obj
;
71 unsigned long freed
= 0;
74 if (!msm_gem_shrinker_lock(dev
, &unlock
))
77 list_for_each_entry(msm_obj
, &priv
->inactive_list
, mm_list
) {
78 if (freed
>= sc
->nr_to_scan
)
80 if (is_purgeable(msm_obj
)) {
81 msm_gem_purge(&msm_obj
->base
, OBJ_LOCK_SHRINKER
);
82 freed
+= msm_obj
->base
.size
>> PAGE_SHIFT
;
87 mutex_unlock(&dev
->struct_mutex
);
90 pr_info_ratelimited("Purging %lu bytes\n", freed
<< PAGE_SHIFT
);
96 msm_gem_shrinker_vmap(struct notifier_block
*nb
, unsigned long event
, void *ptr
)
98 struct msm_drm_private
*priv
=
99 container_of(nb
, struct msm_drm_private
, vmap_notifier
);
100 struct drm_device
*dev
= priv
->dev
;
101 struct msm_gem_object
*msm_obj
;
102 unsigned unmapped
= 0;
105 if (!msm_gem_shrinker_lock(dev
, &unlock
))
108 list_for_each_entry(msm_obj
, &priv
->inactive_list
, mm_list
) {
109 if (is_vunmapable(msm_obj
)) {
110 msm_gem_vunmap(&msm_obj
->base
, OBJ_LOCK_SHRINKER
);
111 /* since we don't know any better, lets bail after a few
112 * and if necessary the shrinker will be invoked again.
113 * Seems better than unmapping *everything*
115 if (++unmapped
>= 15)
121 mutex_unlock(&dev
->struct_mutex
);
123 *(unsigned long *)ptr
+= unmapped
;
126 pr_info_ratelimited("Purging %u vmaps\n", unmapped
);
132 * msm_gem_shrinker_init - Initialize msm shrinker
133 * @dev_priv: msm device
135 * This function registers and sets up the msm shrinker.
137 void msm_gem_shrinker_init(struct drm_device
*dev
)
139 struct msm_drm_private
*priv
= dev
->dev_private
;
140 priv
->shrinker
.count_objects
= msm_gem_shrinker_count
;
141 priv
->shrinker
.scan_objects
= msm_gem_shrinker_scan
;
142 priv
->shrinker
.seeks
= DEFAULT_SEEKS
;
143 WARN_ON(register_shrinker(&priv
->shrinker
));
145 priv
->vmap_notifier
.notifier_call
= msm_gem_shrinker_vmap
;
146 WARN_ON(register_vmap_purge_notifier(&priv
->vmap_notifier
));
150 * msm_gem_shrinker_cleanup - Clean up msm shrinker
151 * @dev_priv: msm device
153 * This function unregisters the msm shrinker.
155 void msm_gem_shrinker_cleanup(struct drm_device
*dev
)
157 struct msm_drm_private
*priv
= dev
->dev_private
;
159 if (priv
->shrinker
.nr_deferred
) {
160 WARN_ON(unregister_vmap_purge_notifier(&priv
->vmap_notifier
));
161 unregister_shrinker(&priv
->shrinker
);