summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem_shrinker.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2022-08-02 17:51:43 +0200
committerRob Clark <robdclark@chromium.org>2022-08-27 18:32:45 +0200
commitb352ba54a82072614f44c5ea352b5ea6e7f7b4c0 (patch)
tree483719eba02232bdcc0f51a47b650ab29f1f8e50 /drivers/gpu/drm/msm/msm_gem_shrinker.c
parentdrm/gem: Add LRU/shrinker helper (diff)
downloadlinux-b352ba54a82072614f44c5ea352b5ea6e7f7b4c0.tar.xz
linux-b352ba54a82072614f44c5ea352b5ea6e7f7b4c0.zip
drm/msm/gem: Convert to using drm_gem_lru
This converts over to use the shared GEM LRU/shrinker helpers. Note that it means we are no longer tracking purgeable or willneed buffers that are active separately. But the most recently pinned buffers should be at the tail of the various LRUs, and the shrinker is already prepared to encounter objects which are still active. Signed-off-by: Rob Clark <robdclark@chromium.org> Patchwork: https://patchwork.freedesktop.org/patch/496131/ Link: https://lore.kernel.org/r/20220802155152.1727594-11-robdclark@gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c117
1 files changed, 29 insertions, 88 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 0ab058734777..f1e52410f521 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -29,121 +29,61 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned count = priv->shrinkable_count;
+ unsigned count = priv->lru.dontneed.count;
if (can_swap())
- count += priv->evictable_count;
+ count += priv->lru.willneed.count;
return count;
}
static bool
-purge(struct msm_gem_object *msm_obj)
+purge(struct drm_gem_object *obj)
{
- if (!is_purgeable(msm_obj))
+ if (!is_purgeable(to_msm_bo(obj)))
return false;
- if (msm_gem_active(&msm_obj->base))
+ if (msm_gem_active(obj))
return false;
- /*
- * This will move the obj out of still_in_list to
- * the purged list
- */
- msm_gem_purge(&msm_obj->base);
+ msm_gem_purge(obj);
return true;
}
static bool
-evict(struct msm_gem_object *msm_obj)
+evict(struct drm_gem_object *obj)
{
- if (is_unevictable(msm_obj))
+ if (is_unevictable(to_msm_bo(obj)))
return false;
- if (msm_gem_active(&msm_obj->base))
+ if (msm_gem_active(obj))
return false;
- msm_gem_evict(&msm_obj->base);
+ msm_gem_evict(obj);
return true;
}
static unsigned long
-scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
- bool (*shrink)(struct msm_gem_object *msm_obj))
-{
- unsigned freed = 0;
- struct list_head still_in_list;
-
- INIT_LIST_HEAD(&still_in_list);
-
- mutex_lock(&priv->mm_lock);
-
- while (freed < nr_to_scan) {
- struct msm_gem_object *msm_obj = list_first_entry_or_null(
- list, typeof(*msm_obj), mm_list);
-
- if (!msm_obj)
- break;
-
- list_move_tail(&msm_obj->mm_list, &still_in_list);
-
- /*
- * If it is in the process of being freed, msm_gem_free_object
- * can be blocked on mm_lock waiting to remove it. So just
- * skip it.
- */
- if (!kref_get_unless_zero(&msm_obj->base.refcount))
- continue;
-
- /*
- * Now that we own a reference, we can drop mm_lock for the
- * rest of the loop body, to reduce contention with the
- * retire_submit path (which could make more objects purgeable)
- */
-
- mutex_unlock(&priv->mm_lock);
-
- /*
- * Note that this still needs to be trylock, since we can
- * hit shrinker in response to trying to get backing pages
- * for this obj (ie. while it's lock is already held)
- */
- if (!msm_gem_trylock(&msm_obj->base))
- goto tail;
-
- if (shrink(msm_obj))
- freed += msm_obj->base.size >> PAGE_SHIFT;
-
- msm_gem_unlock(&msm_obj->base);
-
-tail:
- drm_gem_object_put(&msm_obj->base);
- mutex_lock(&priv->mm_lock);
- }
-
- list_splice_tail(&still_in_list, list);
- mutex_unlock(&priv->mm_lock);
-
- return freed;
-}
-
-static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
+ long nr = sc->nr_to_scan;
unsigned long freed;
- freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
+ freed = drm_gem_lru_scan(&priv->lru.dontneed, nr, purge);
+ nr -= freed;
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);
- if (can_swap() && freed < sc->nr_to_scan) {
- int evicted = scan(priv, sc->nr_to_scan - freed,
- &priv->inactive_willneed, evict);
+ if (can_swap() && nr > 0) {
+ unsigned long evicted;
+
+ evicted = drm_gem_lru_scan(&priv->lru.willneed, nr, evict);
+ nr -= evicted;
if (evicted > 0)
trace_msm_gem_evict(evicted << PAGE_SHIFT);
@@ -179,12 +119,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct msm_gem_object *msm_obj)
+vmap_shrink(struct drm_gem_object *obj)
{
- if (!is_vunmapable(msm_obj))
+ if (!is_vunmapable(to_msm_bo(obj)))
return false;
- msm_gem_vunmap(&msm_obj->base);
+ msm_gem_vunmap(obj);
return true;
}
@@ -194,17 +134,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
- struct list_head *mm_lists[] = {
- &priv->inactive_dontneed,
- &priv->inactive_willneed,
- priv->gpu ? &priv->gpu->active_list : NULL,
+ struct drm_gem_lru *lrus[] = {
+ &priv->lru.dontneed,
+ &priv->lru.willneed,
+ &priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
- for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
- unmapped += scan(priv, vmap_shrink_limit - unmapped,
- mm_lists[idx], vmap_shrink);
+ for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ unmapped += drm_gem_lru_scan(lrus[idx],
+ vmap_shrink_limit - unmapped,
+ vmap_shrink);
}
*(unsigned long *)ptr += unmapped;