Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2016 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #include <linux/vmalloc.h>
0008 #include <linux/sched/mm.h>
0009 
0010 #include "msm_drv.h"
0011 #include "msm_gem.h"
0012 #include "msm_gpu.h"
0013 #include "msm_gpu_trace.h"
0014 
0015 /* Default disabled for now until it has some more testing on the different
0016  * iommu combinations that can be paired with the driver:
0017  */
0018 static bool enable_eviction = false;
0019 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
0020 module_param(enable_eviction, bool, 0600);
0021 
0022 static bool can_swap(void)
0023 {
0024     return enable_eviction && get_nr_swap_pages() > 0;
0025 }
0026 
0027 static unsigned long
0028 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
0029 {
0030     struct msm_drm_private *priv =
0031         container_of(shrinker, struct msm_drm_private, shrinker);
0032     unsigned count = priv->shrinkable_count;
0033 
0034     if (can_swap())
0035         count += priv->evictable_count;
0036 
0037     return count;
0038 }
0039 
0040 static bool
0041 purge(struct msm_gem_object *msm_obj)
0042 {
0043     if (!is_purgeable(msm_obj))
0044         return false;
0045 
0046     /*
0047      * This will move the obj out of still_in_list to
0048      * the purged list
0049      */
0050     msm_gem_purge(&msm_obj->base);
0051 
0052     return true;
0053 }
0054 
0055 static bool
0056 evict(struct msm_gem_object *msm_obj)
0057 {
0058     if (is_unevictable(msm_obj))
0059         return false;
0060 
0061     msm_gem_evict(&msm_obj->base);
0062 
0063     return true;
0064 }
0065 
0066 static unsigned long
0067 scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
0068         bool (*shrink)(struct msm_gem_object *msm_obj))
0069 {
0070     unsigned freed = 0;
0071     struct list_head still_in_list;
0072 
0073     INIT_LIST_HEAD(&still_in_list);
0074 
0075     mutex_lock(&priv->mm_lock);
0076 
0077     while (freed < nr_to_scan) {
0078         struct msm_gem_object *msm_obj = list_first_entry_or_null(
0079                 list, typeof(*msm_obj), mm_list);
0080 
0081         if (!msm_obj)
0082             break;
0083 
0084         list_move_tail(&msm_obj->mm_list, &still_in_list);
0085 
0086         /*
0087          * If it is in the process of being freed, msm_gem_free_object
0088          * can be blocked on mm_lock waiting to remove it.  So just
0089          * skip it.
0090          */
0091         if (!kref_get_unless_zero(&msm_obj->base.refcount))
0092             continue;
0093 
0094         /*
0095          * Now that we own a reference, we can drop mm_lock for the
0096          * rest of the loop body, to reduce contention with the
0097          * retire_submit path (which could make more objects purgeable)
0098          */
0099 
0100         mutex_unlock(&priv->mm_lock);
0101 
0102         /*
0103          * Note that this still needs to be trylock, since we can
0104          * hit shrinker in response to trying to get backing pages
0105          * for this obj (ie. while it's lock is already held)
0106          */
0107         if (!msm_gem_trylock(&msm_obj->base))
0108             goto tail;
0109 
0110         if (shrink(msm_obj))
0111             freed += msm_obj->base.size >> PAGE_SHIFT;
0112 
0113         msm_gem_unlock(&msm_obj->base);
0114 
0115 tail:
0116         drm_gem_object_put(&msm_obj->base);
0117         mutex_lock(&priv->mm_lock);
0118     }
0119 
0120     list_splice_tail(&still_in_list, list);
0121     mutex_unlock(&priv->mm_lock);
0122 
0123     return freed;
0124 }
0125 
0126 static unsigned long
0127 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
0128 {
0129     struct msm_drm_private *priv =
0130         container_of(shrinker, struct msm_drm_private, shrinker);
0131     unsigned long freed;
0132 
0133     freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
0134 
0135     if (freed > 0)
0136         trace_msm_gem_purge(freed << PAGE_SHIFT);
0137 
0138     if (can_swap() && freed < sc->nr_to_scan) {
0139         int evicted = scan(priv, sc->nr_to_scan - freed,
0140                 &priv->inactive_willneed, evict);
0141 
0142         if (evicted > 0)
0143             trace_msm_gem_evict(evicted << PAGE_SHIFT);
0144 
0145         freed += evicted;
0146     }
0147 
0148     return (freed > 0) ? freed : SHRINK_STOP;
0149 }
0150 
0151 #ifdef CONFIG_DEBUG_FS
0152 unsigned long
0153 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
0154 {
0155     struct msm_drm_private *priv = dev->dev_private;
0156     struct shrink_control sc = {
0157         .nr_to_scan = nr_to_scan,
0158     };
0159     int ret;
0160 
0161     fs_reclaim_acquire(GFP_KERNEL);
0162     ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
0163     fs_reclaim_release(GFP_KERNEL);
0164 
0165     return ret;
0166 }
0167 #endif
0168 
0169 /* since we don't know any better, lets bail after a few
0170  * and if necessary the shrinker will be invoked again.
0171  * Seems better than unmapping *everything*
0172  */
0173 static const int vmap_shrink_limit = 15;
0174 
0175 static bool
0176 vmap_shrink(struct msm_gem_object *msm_obj)
0177 {
0178     if (!is_vunmapable(msm_obj))
0179         return false;
0180 
0181     msm_gem_vunmap(&msm_obj->base);
0182 
0183     return true;
0184 }
0185 
0186 static int
0187 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
0188 {
0189     struct msm_drm_private *priv =
0190         container_of(nb, struct msm_drm_private, vmap_notifier);
0191     struct list_head *mm_lists[] = {
0192         &priv->inactive_dontneed,
0193         &priv->inactive_willneed,
0194         priv->gpu ? &priv->gpu->active_list : NULL,
0195         NULL,
0196     };
0197     unsigned idx, unmapped = 0;
0198 
0199     for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
0200         unmapped += scan(priv, vmap_shrink_limit - unmapped,
0201                 mm_lists[idx], vmap_shrink);
0202     }
0203 
0204     *(unsigned long *)ptr += unmapped;
0205 
0206     if (unmapped > 0)
0207         trace_msm_gem_purge_vmaps(unmapped);
0208 
0209     return NOTIFY_DONE;
0210 }
0211 
0212 /**
0213  * msm_gem_shrinker_init - Initialize msm shrinker
0214  * @dev: drm device
0215  *
0216  * This function registers and sets up the msm shrinker.
0217  */
0218 void msm_gem_shrinker_init(struct drm_device *dev)
0219 {
0220     struct msm_drm_private *priv = dev->dev_private;
0221     priv->shrinker.count_objects = msm_gem_shrinker_count;
0222     priv->shrinker.scan_objects = msm_gem_shrinker_scan;
0223     priv->shrinker.seeks = DEFAULT_SEEKS;
0224     WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
0225 
0226     priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
0227     WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
0228 }
0229 
0230 /**
0231  * msm_gem_shrinker_cleanup - Clean up msm shrinker
0232  * @dev: drm device
0233  *
0234  * This function unregisters the msm shrinker.
0235  */
0236 void msm_gem_shrinker_cleanup(struct drm_device *dev)
0237 {
0238     struct msm_drm_private *priv = dev->dev_private;
0239 
0240     if (priv->shrinker.nr_deferred) {
0241         WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
0242         unregister_shrinker(&priv->shrinker);
0243     }
0244 }